repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pydata/xarray | xarray/tests/test_dask.py | 1 | 59200 | import operator
import pickle
import sys
from contextlib import suppress
from distutils.version import LooseVersion
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.ufuncs as xu
from xarray import DataArray, Dataset, Variable
from xarray.core import duck_array_ops
from xarray.testing import assert_chunks_equal
from xarray.tests import mock
from ..core.duck_array_ops import lazy_array_equiv
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_frame_equal,
assert_identical,
raise_if_dask_computes,
requires_pint_0_15,
requires_scipy_or_netCDF4,
)
from .test_backends import create_tmp_file
dask = pytest.importorskip("dask")
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
ON_WINDOWS = sys.platform == "win32"
def test_raise_if_dask_computes():
data = da.from_array(np.random.RandomState(0).randn(4, 6), chunks=(2, 2))
with pytest.raises(RuntimeError, match=r"Too many computes"):
with raise_if_dask_computes():
data.compute()
class DaskTestCase:
def assertLazyAnd(self, expected, actual, test):
with dask.config.set(scheduler="synchronous"):
test(actual, expected)
if isinstance(actual, Dataset):
for k, v in actual.variables.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, DataArray):
assert isinstance(actual.data, da.Array)
for k, v in actual.coords.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, Variable):
assert isinstance(actual.data, da.Array)
else:
assert False
class TestVariable(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.RandomState(0).randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(("x", "y"), self.values)
self.lazy_var = Variable(("x", "y"), self.data)
def test_basics(self):
v = self.lazy_var
assert self.data is v.data
assert self.data.chunks == v.chunks
assert_array_equal(self.values, v)
def test_copy(self):
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True))
def test_chunk(self):
for chunks, expected in [
({}, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({"x": 3, "y": 3}, ((3, 1), (3, 3))),
({"x": 3}, ((3, 1), (2, 2, 2))),
({"x": (3, 1)}, ((3, 1), (2, 2, 2))),
]:
rechunked = self.lazy_var.chunk(chunks)
assert rechunked.chunks == expected
self.assertLazyAndIdentical(self.eager_var, rechunked)
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
@pytest.mark.skipif(
LooseVersion(dask.__version__) < LooseVersion("2021.04.1"),
reason="Requires dask v2021.04.1 or later",
)
@pytest.mark.parametrize(
"expected_data, index",
[
(da.array([99, 2, 3, 4]), 0),
(da.array([99, 99, 99, 4]), slice(2, None, -1)),
(da.array([99, 99, 3, 99]), [0, -1, 1]),
(da.array([99, 99, 99, 4]), np.arange(3)),
(da.array([1, 99, 99, 99]), [False, True, True, True]),
(da.array([1, 99, 99, 99]), np.arange(4) > 0),
(da.array([99, 99, 99, 99]), Variable(("x"), da.array([1, 2, 3, 4])) > 0),
],
)
def test_setitem_dask_array(self, expected_data, index):
arr = Variable(("x"), da.array([1, 2, 3, 4]))
expected = Variable(("x"), expected_data)
arr[index] = 99
assert_identical(arr, expected)
@pytest.mark.skipif(
LooseVersion(dask.__version__) >= LooseVersion("2021.04.1"),
reason="Requires dask v2021.04.0 or earlier",
)
def test_setitem_dask_array_error(self):
with pytest.raises(TypeError, match=r"stored in a dask array"):
v = self.lazy_var
v[:1] = 0
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
assert v.equals(v)
assert isinstance(v.data, da.Array)
assert v.identical(v)
assert isinstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_shift(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
assert v.data.chunks == v.shift(x=1).data.chunks
def test_roll(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
assert v.data.chunks == v.roll(x=1).data.chunks
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_repr(self):
expected = dedent(
"""\
<xarray.Variable (x: 4, y: 6)>
{!r}""".format(
self.lazy_var.data
)
)
assert expected == repr(self.lazy_var)
def test_pickle(self):
# Test that pickling/unpickling does not convert the dask
# backend to numpy
a1 = Variable(["x"], build_dask_array("x"))
a1.compute()
assert not a1._in_memory
assert kernel_call_count == 1
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 1
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
with raise_if_dask_computes():
actual = v.argmax(dim="x")
self.assertLazyAndAllClose(u.argmax(dim="x"), actual)
with raise_if_dask_computes():
actual = v.argmin(dim="x")
self.assertLazyAndAllClose(u.argmin(dim="x"), actual)
self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x"))
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median()
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median(v.dims)
with raise_if_dask_computes():
v.reduce(duck_array_ops.mean)
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable("x", values)
lazy_var = Variable("x", data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable("x", range(4)), lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], "x"))
self.assertLazyAndIdentical(
u[:3], Variable.concat([v[[0, 2]], v[[1]]], "x", positions=[[0, 2], [1]])
)
def test_missing_methods(self):
v = self.lazy_var
try:
v.argsort()
except NotImplementedError as err:
assert "dask" in str(err)
try:
v[0].item()
except NotImplementedError as err:
assert "dask" in str(err)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
def test_compute(self):
u = self.eager_var
v = self.lazy_var
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_var
v = self.lazy_var + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
@requires_pint_0_15(reason="Need __dask_tokenize__")
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, "meter")
variable = xr.Variable(("x", "y"), q)
token = dask.base.tokenize(variable)
post_op = variable + 5 * unit_registry.meter
assert dask.base.tokenize(variable) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(variable) == token
class TestDataArrayAndDataset(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
def assertLazyAndEqual(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_equal)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_array = DataArray(
self.values, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
self.lazy_array = DataArray(
self.data, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
def test_rechunk(self):
chunked = self.eager_array.chunk({"x": 2}).chunk({"y": 2})
assert chunked.chunks == ((2,) * 2, (2,) * 3)
self.assertLazyAndIdentical(self.lazy_array, chunked)
def test_new_chunk(self):
chunked = self.eager_array.chunk()
assert chunked.data.name.startswith("xarray-<this-array>")
def test_lazy_dataset(self):
lazy_ds = Dataset({"foo": (("x", "y"), self.data)})
assert isinstance(lazy_ds.foo.variable.data, da.Array)
def test_lazy_array(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(u, v)
self.assertLazyAndAllClose(-u, -v)
self.assertLazyAndAllClose(u.T, v.T)
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(1 + u, 1 + v)
actual = xr.concat([v[:2], v[2:]], "x")
self.assertLazyAndAllClose(u, actual)
def test_compute(self):
u = self.eager_array
v = self.lazy_array
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_array
v = self.lazy_array + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
def test_concat_loads_variables(self):
# Test that concat() computes not-in-memory variables at most once
# and loads them in the output, while leaving the input unaltered.
d1 = build_dask_array("d1")
c1 = build_dask_array("c1")
d2 = build_dask_array("d2")
c2 = build_dask_array("c2")
d3 = build_dask_array("d3")
c3 = build_dask_array("c3")
# Note: c is a non-index coord.
# Index coords are loaded by IndexVariable.__init__.
ds1 = Dataset(data_vars={"d": ("x", d1)}, coords={"c": ("x", c1)})
ds2 = Dataset(data_vars={"d": ("x", d2)}, coords={"c": ("x", c2)})
ds3 = Dataset(data_vars={"d": ("x", d3)}, coords={"c": ("x", c3)})
assert kernel_call_count == 0
out = xr.concat(
[ds1, ds2, ds3], dim="n", data_vars="different", coords="different"
)
# each kernel is computed exactly once
assert kernel_call_count == 6
# variables are loaded in the output
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars="all", coords="all")
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=["d"], coords=["c"])
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=[], coords=[])
# variables are loaded once as we are validing that they're identical
assert kernel_call_count == 12
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
out = xr.concat(
[ds1, ds2, ds3],
dim="n",
data_vars="different",
coords="different",
compat="identical",
)
# compat=identical doesn't do any more kernel calls than compat=equals
assert kernel_call_count == 18
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
# When the test for different turns true halfway through,
# stop computing variables as it would not have any benefit
ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])})
out = xr.concat(
[ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different"
)
# the variables of ds1 and ds2 were computed, but those of ds3 didn't
assert kernel_call_count == 22
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
# the data of ds1 and ds2 was loaded into numpy and then
# concatenated to the data of ds3. Thus, only ds3 is computed now.
out.compute()
assert kernel_call_count == 24
# Finally, test that originals are unaltered
assert ds1["d"].data is d1
assert ds1["c"].data is c1
assert ds2["d"].data is d2
assert ds2["c"].data is c2
assert ds3["d"].data is d3
assert ds3["c"].data is c3
# now check that concat() is correctly using dask name equality to skip loads
out = xr.concat(
[ds1, ds1, ds1], dim="n", data_vars="different", coords="different"
)
assert kernel_call_count == 24
# variables are not loaded in the output
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat(
[ds1, ds1, ds1], dim="n", data_vars=[], coords=[], compat="identical"
)
assert kernel_call_count == 24
# variables are not loaded in the output
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat(
[ds1, ds2.compute(), ds3],
dim="n",
data_vars="all",
coords="different",
compat="identical",
)
# c1,c3 must be computed for comparison since c2 is numpy;
# d2 is computed too
assert kernel_call_count == 28
out = xr.concat(
[ds1, ds2.compute(), ds3],
dim="n",
data_vars="all",
coords="all",
compat="identical",
)
# no extra computes
assert kernel_call_count == 30
# Finally, test that originals are unaltered
assert ds1["d"].data is d1
assert ds1["c"].data is c1
assert ds2["d"].data is d2
assert ds2["c"].data is c2
assert ds3["d"].data is d3
assert ds3["c"].data is c3
def test_groupby(self):
u = self.eager_array
v = self.lazy_array
expected = u.groupby("x").mean(...)
with raise_if_dask_computes():
actual = v.groupby("x").mean(...)
self.assertLazyAndAllClose(expected, actual)
def test_rolling(self):
u = self.eager_array
v = self.lazy_array
expected = u.rolling(x=2).mean()
with raise_if_dask_computes():
actual = v.rolling(x=2).mean()
self.assertLazyAndAllClose(expected, actual)
def test_groupby_first(self):
u = self.eager_array
v = self.lazy_array
for coords in [u.coords, v.coords]:
coords["ab"] = ("x", ["a", "a", "b", "b"])
with pytest.raises(NotImplementedError, match=r"dask"):
v.groupby("ab").first()
expected = u.groupby("ab").first()
with raise_if_dask_computes():
actual = v.groupby("ab").first(skipna=False)
self.assertLazyAndAllClose(expected, actual)
def test_reindex(self):
u = self.eager_array.assign_coords(y=range(6))
v = self.lazy_array.assign_coords(y=range(6))
for kwargs in [
{"x": [2, 3, 4]},
{"x": [1, 100, 2, 101, 3]},
{"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
]:
expected = u.reindex(**kwargs)
actual = v.reindex(**kwargs)
self.assertLazyAndAllClose(expected, actual)
def test_to_dataset_roundtrip(self):
u = self.eager_array
v = self.lazy_array
expected = u.assign_coords(x=u["x"])
self.assertLazyAndEqual(expected, v.to_dataset("x").to_array("x"))
def test_merge(self):
def duplicate_and_merge(array):
return xr.merge([array, array.rename("bar")]).to_array()
expected = duplicate_and_merge(self.eager_array)
actual = duplicate_and_merge(self.lazy_array)
self.assertLazyAndEqual(expected, actual)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_where_dispatching(self):
a = np.arange(10)
b = a > 3
x = da.from_array(a, 5)
y = da.from_array(b, 5)
expected = DataArray(a).where(b)
self.assertLazyAndEqual(expected, DataArray(a).where(y))
self.assertLazyAndEqual(expected, DataArray(x).where(b))
self.assertLazyAndEqual(expected, DataArray(x).where(y))
def test_simultaneous_compute(self):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
count = [0]
def counting_get(*args, **kwargs):
count[0] += 1
return dask.get(*args, **kwargs)
ds.load(scheduler=counting_get)
assert count[0] == 1
def test_stack(self):
data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))
arr = DataArray(data, dims=("w", "x", "y"))
stacked = arr.stack(z=("x", "y"))
z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"])
expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"])
assert stacked.data.chunks == expected.data.chunks
self.assertLazyAndEqual(expected, stacked)
def test_dot(self):
eager = self.eager_array.dot(self.eager_array[0])
lazy = self.lazy_array.dot(self.lazy_array[0])
self.assertLazyAndAllClose(eager, lazy)
@pytest.mark.skipif(LooseVersion(dask.__version__) >= "2.0", reason="no meta")
def test_dataarray_repr_legacy(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.DataArray 'data' (x: 1)>
{!r}
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x""".format(
data
)
)
assert expected == repr(a)
assert kernel_call_count == 0 # should not evaluate dask array
@pytest.mark.skipif(LooseVersion(dask.__version__) < "2.0", reason="needs meta")
def test_dataarray_repr(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.DataArray 'data' (x: 1)>
{!r}
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x""".format(
data
)
)
assert expected == repr(a)
assert kernel_call_count == 0 # should not evaluate dask array
@pytest.mark.skipif(LooseVersion(dask.__version__) < "2.0", reason="needs meta")
def test_dataset_repr(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 1)
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x
Data variables:
a (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>"""
)
assert expected == repr(ds)
assert kernel_call_count == 0 # should not evaluate dask array
def test_dataarray_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variable nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a1 = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
a1.compute()
assert not a1._in_memory
assert not a1.coords["y"]._in_memory
assert kernel_call_count == 2
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 2
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
assert not a1.coords["y"]._in_memory
assert not a2.coords["y"]._in_memory
def test_dataset_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds1 = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
ds1.compute()
assert not ds1["a"]._in_memory
assert not ds1["y"]._in_memory
assert kernel_call_count == 2
ds2 = pickle.loads(pickle.dumps(ds1))
assert kernel_call_count == 2
assert_identical(ds1, ds2)
assert not ds1["a"]._in_memory
assert not ds2["a"]._in_memory
assert not ds1["y"]._in_memory
assert not ds2["y"]._in_memory
def test_dataarray_getattr(self):
# ipython/jupyter does a long list of getattr() calls to when trying to
# represent an object.
# Make sure we're not accidentally computing dask variables.
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
with suppress(AttributeError):
getattr(a, "NOTEXIST")
assert kernel_call_count == 0
def test_dataset_getattr(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
with suppress(AttributeError):
getattr(ds, "NOTEXIST")
assert kernel_call_count == 0
def test_values(self):
# Test that invoking the values property does not convert the dask
# backend to numpy
a = DataArray([1, 2]).chunk()
assert not a._in_memory
assert a.values.tolist() == [1, 2]
assert not a._in_memory
def test_from_dask_variable(self):
# Test array creation from Variable with dask backend.
# This is used e.g. in broadcast()
a = DataArray(self.lazy_array.variable, coords={"x": range(4)}, name="foo")
self.assertLazyAndIdentical(self.lazy_array, a)
@requires_pint_0_15(reason="Need __dask_tokenize__")
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, unit_registry.meter)
data_array = xr.DataArray(
data=q, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
token = dask.base.tokenize(data_array)
post_op = data_array + 5 * unit_registry.meter
assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(data_array) == token
class TestToDaskDataFrame:
def test_to_dask_dataframe(self):
# Test conversion of Datasets to dask DataFrames
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset(
{"a": ("t", da.from_array(x, chunks=4)), "b": ("t", y), "t": ("t", t)}
)
expected_pd = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
# test if 1-D index is correctly set up
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
# test if we have dask dataframes
assert isinstance(actual, dd.DataFrame)
# use the .equals from pandas to check dataframes are equivalent
assert_frame_equal(expected.compute(), actual.compute())
# test if no index is given
expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_2D(self):
# Test if 2-D dataset is supplied
w = np.random.randn(2, 3)
ds = Dataset({"w": (("x", "y"), da.from_array(w, chunks=(1, 2)))})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
# dask dataframes do not (yet) support multiindex,
# but when it does, this would be the expected index:
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame({"w": w.reshape(-1)}, index=exp_index)
# so for now, reset the index
expected = expected.reset_index(drop=False)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
@pytest.mark.xfail(raises=NotImplementedError)
def test_to_dask_dataframe_2D_set_index(self):
# This will fail until dask implements MultiIndex support
w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
ds = Dataset({"w": (("x", "y"), w)})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_coordinates(self):
# Test if coordinate is also a dask array
x = np.random.randn(10)
t = np.arange(10) * 2
ds = Dataset(
{
"a": ("t", da.from_array(x, chunks=4)),
"t": ("t", da.from_array(t, chunks=4)),
}
)
expected_pd = pd.DataFrame({"a": x}, index=pd.Index(t, name="t"))
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_not_daskarray(self):
# Test if DataArray is not a dask array
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_no_coordinate(self):
x = da.from_array(np.random.randn(10), chunks=4)
ds = Dataset({"x": ("dim_0", x)})
expected = ds.compute().to_dataframe().reset_index()
actual = ds.to_dask_dataframe()
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_dim_order(self):
values = np.array([[1, 2], [3, 4]], dtype=np.int64)
ds = Dataset({"w": (("x", "y"), values)}).chunk(1)
expected = ds["w"].to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["x", "y"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds["w"].T.to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["y", "x"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
with pytest.raises(ValueError, match=r"does not match the set of dimensions"):
ds.to_dask_dataframe(dim_order=["x"])
@pytest.mark.parametrize("method", ["load", "compute"])
def test_dask_kwargs_variable(method):
x = Variable("y", da.from_array(np.arange(3), chunks=(2,)))
# args should be passed on to da.Array.compute()
with mock.patch.object(
da.Array, "compute", return_value=np.arange(3)
) as mock_compute:
getattr(x, method)(foo="bar")
mock_compute.assert_called_with(foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataarray(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = DataArray(data)
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataset(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = Dataset({"x": (("y"), data)})
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
kernel_call_count = 0
def kernel(name):
"""Dask kernel to test pickling/unpickling and __repr__.
Must be global to make it pickleable.
"""
global kernel_call_count
kernel_call_count += 1
return np.ones(1, dtype=np.int64)
def build_dask_array(name):
global kernel_call_count
kernel_call_count = 0
return dask.array.Array(
dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64
)
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_Dataset(persist):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
ds = ds + 1
n = len(ds.foo.data.dask)
ds2 = persist(ds)
assert len(ds2.foo.data.dask) == 1
assert len(ds.foo.data.dask) == n # doesn't mutate in place
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_DataArray(persist):
x = da.arange(10, chunks=(5,))
y = DataArray(x)
z = y + 1
n = len(z.data.dask)
zz = persist(z)
assert len(z.data.dask) == n
assert len(zz.data.dask) == zz.data.npartitions
def test_dataarray_with_dask_coords():
import toolz
x = xr.Variable("x", da.arange(8, chunks=(4,)))
y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2)
data = da.random.random((8, 8), chunks=(4, 4)) + 1
array = xr.DataArray(data, dims=["x", "y"])
array.coords["xx"] = x
array.coords["yy"] = y
assert dict(array.__dask_graph__()) == toolz.merge(
data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__()
)
(array2,) = dask.compute(array)
assert not dask.is_dask_collection(array2)
assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())
def test_basic_compute():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2})
for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]:
with dask.config.set(scheduler=get):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
def test_dask_layers_and_dependencies():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
x = dask.delayed(ds)
assert set(x.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
assert set(x.foo.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
def make_da():
da = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(100, 120)},
name="a",
).chunk({"x": 4, "y": 5})
da.x.attrs["long_name"] = "x"
da.attrs["test"] = "test"
da.coords["c2"] = 0.5
da.coords["ndcoord"] = da.x * 2
da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5})
return da
def make_ds():
map_ds = xr.Dataset()
map_ds["a"] = make_da()
map_ds["b"] = map_ds.a + 50
map_ds["c"] = map_ds.x + 20
map_ds = map_ds.chunk({"x": 4, "y": 5})
map_ds["d"] = ("z", [1, 1, 1, 1])
map_ds["z"] = [0, 1, 2, 3]
map_ds["e"] = map_ds.x + map_ds.y
map_ds.coords["c1"] = 0.5
map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x)))
map_ds.coords["cx"].attrs["test2"] = "test2"
map_ds.attrs["test"] = "test"
map_ds.coords["xx"] = map_ds["a"] * map_ds.y
map_ds.x.attrs["long_name"] = "x"
map_ds.y.attrs["long_name"] = "y"
return map_ds
# fixtures cannot be used in parametrize statements
# instead use this workaround
# https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly
@pytest.fixture
def map_da():
return make_da()
@pytest.fixture
def map_ds():
return make_ds()
def test_unify_chunks(map_ds):
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
ds_copy.chunks
expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)}
with raise_if_dask_computes():
actual_chunks = ds_copy.unify_chunks().chunks
assert actual_chunks == expected_chunks
assert_identical(map_ds, ds_copy.unify_chunks())
out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy"))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == expected_chunks
# Test unordered dims
da = ds_copy["cxy"]
out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1}))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2))
# Test mismatch
with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"):
xr.unify_chunks(da, da.isel(x=slice(2)))
@pytest.mark.parametrize("obj", [make_ds(), make_da()])
@pytest.mark.parametrize(
"transform", [lambda x: x.compute(), lambda x: x.unify_chunks()]
)
def test_unify_chunks_shallow_copy(obj, transform):
obj = transform(obj)
unified = obj.unify_chunks()
assert_identical(obj, unified) and obj is not obj.unify_chunks()
@pytest.mark.parametrize("obj", [make_da()])
def test_auto_chunk_da(obj):
actual = obj.chunk("auto").data
expected = obj.data.rechunk("auto")
np.testing.assert_array_equal(actual, expected)
assert actual.chunks == expected.chunks
def test_map_blocks_error(map_da, map_ds):
def bad_func(darray):
return (darray * darray.x + 5 * darray.y)[:1, :1]
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(bad_func, map_da).compute()
def returns_numpy(darray):
return (darray * darray.x + 5 * darray.y).values
with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"):
xr.map_blocks(returns_numpy, map_da)
with pytest.raises(TypeError, match=r"args must be"):
xr.map_blocks(operator.add, map_da, args=10)
with pytest.raises(TypeError, match=r"kwargs must be"):
xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])
def really_bad_func(darray):
raise ValueError("couldn't do anything.")
with pytest.raises(Exception, match=r"Cannot infer"):
xr.map_blocks(really_bad_func, map_da)
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
xr.map_blocks(bad_func, ds_copy)
with pytest.raises(TypeError, match=r"Cannot pass dask collections"):
xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk()))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj)
expected = func(obj)
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_convert_args_to_list(obj):
expected = obj + 10
with raise_if_dask_computes():
actual = xr.map_blocks(operator.add, obj, [10])
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_dask_args():
da1 = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(20)},
).chunk({"x": 5, "y": 4})
# check that block shapes are the same
def sumda(da1, da2):
assert da1.shape == da2.shape
return da1 + da2
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(sumda, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# one dimension in common
da2 = (da1 + 1).isel(x=1, drop=True)
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# test that everything works when dimension names are different
da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"})
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"):
xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})])
with pytest.raises(ValueError, match=r"indexes along dimension 'x' are not equal"):
xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))])
# reduction
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2])
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
# reduction with template
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(
lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x")
)
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_add_attrs(obj):
def add_attrs(obj):
obj = obj.copy(deep=True)
obj.attrs["new"] = "new"
obj.cxy.attrs["new2"] = "new2"
return obj
expected = add_attrs(obj)
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj)
assert_identical(actual, expected)
# when template is specified, attrs are copied from template, not set by function
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj, template=obj)
assert_identical(actual, obj)
def test_map_blocks_change_name(map_da):
def change_name(obj):
obj = obj.copy(deep=True)
obj.name = "new"
return obj
expected = change_name(map_da)
with raise_if_dask_computes():
actual = xr.map_blocks(change_name, map_da)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_kwargs(obj):
expected = xr.full_like(obj, fill_value=np.nan)
with raise_if_dask_computes():
actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan))
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_to_array(map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(lambda x: x.to_array(), map_ds)
# to_array does not preserve name, so cannot use assert_identical
assert_equal(actual, map_ds.to_array())
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.to_dataset(),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)),
lambda x: x.astype(np.int32),
lambda x: x.x,
],
)
def test_map_blocks_da_transformations(func, map_da):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_da)
assert_identical(actual, func(map_da))
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.drop_vars("cxy"),
lambda x: x.drop_vars("a"),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.rename({"a": "new1", "b": "new2"}),
lambda x: x.x,
],
)
def test_map_blocks_ds_transformations(func, map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_ds)
assert_identical(actual, func(map_ds))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_da_ds_with_template(obj):
func = lambda x: x.isel(x=[1])
template = obj.isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj, template=template)
assert_identical(actual, template)
with raise_if_dask_computes():
actual = obj.map_blocks(func, template=template)
assert_identical(actual, template)
def test_map_blocks_template_convert_object():
da = make_da()
func = lambda x: x.to_dataset().isel(x=[1])
template = da.to_dataset().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, da, template=template)
assert_identical(actual, template)
ds = da.to_dataset()
func = lambda x: x.to_array().isel(x=[1])
template = ds.to_array().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, ds, template=template)
assert_identical(actual, template)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_errors_bad_template(obj):
with pytest.raises(ValueError, match=r"unexpected coordinate variables"):
xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"does not contain coordinate variables"):
xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"):
xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()
with pytest.raises(TypeError, match=r"must be a DataArray"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute()
with pytest.raises(ValueError, match=r"map_blocks requires that one block"):
xr.map_blocks(
lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1])
).compute()
with pytest.raises(ValueError, match=r"Expected index 'x' to be"):
xr.map_blocks(
lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values
obj,
template=obj.isel(x=[1, 5, 9]),
).compute()
def test_map_blocks_errors_bad_template_2(map_ds):
with pytest.raises(ValueError, match=r"unexpected data variables {'xyz'}"):
xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute()
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_object_method(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
expected = xr.map_blocks(func, obj)
actual = obj.map_blocks(func)
assert_identical(expected, actual)
def test_map_blocks_hlg_layers():
# regression test for #3599
ds = xr.Dataset(
{
"x": (("a",), dask.array.ones(10, chunks=(5,))),
"z": (("b",), dask.array.ones(10, chunks=(5,))),
}
)
mapped = ds.map_blocks(lambda x: x)
xr.testing.assert_equal(mapped, ds)
def test_make_meta(map_ds):
from ..core.parallel import make_meta
meta = make_meta(map_ds)
for variable in map_ds._coord_names:
assert variable in meta._coord_names
assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim
for variable in map_ds.data_vars:
assert variable in meta.data_vars
assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim
def test_identical_coords_no_computes():
lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
a = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
b = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
with raise_if_dask_computes():
c = a + b
assert_identical(c, a)
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
@pytest.mark.parametrize(
"transform",
[
lambda x: x.reset_coords(),
lambda x: x.reset_coords(drop=True),
lambda x: x.isel(x=1),
lambda x: x.attrs.update(new_attrs=1),
lambda x: x.assign_coords(cxy=1),
lambda x: x.rename({"x": "xnew"}),
lambda x: x.rename({"cxy": "cxynew"}),
],
)
def test_token_changes_on_transform(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj))
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
def test_token_changes_when_data_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
# Change data_var
if isinstance(obj, DataArray):
obj *= 2
else:
obj["a"] *= 2
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
# Change non-index coord
obj.coords["ndcoord"] *= 2
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
# Change IndexVariable
obj = obj.assign_coords(x=obj.x * 2)
with raise_if_dask_computes():
t4 = dask.base.tokenize(obj)
assert t4 != t3
@pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()])
def test_token_changes_when_buffer_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
if isinstance(obj, DataArray):
obj[0, 0] = 123
else:
obj["a"][0, 0] = 123
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
obj.coords["ndcoord"][0] = 123
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
@pytest.mark.parametrize(
"transform",
[lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]])
def test_token_identical(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))
assert dask.base.tokenize(obj.compute()) == dask.base.tokenize(
transform(obj.compute())
)
def test_recursive_token():
"""Test that tokenization is invoked recursively, and doesn't just rely on the
output of str()
"""
a = np.ones(10000)
b = np.ones(10000)
b[5000] = 2
assert str(a) == str(b)
assert dask.base.tokenize(a) != dask.base.tokenize(b)
# Test DataArray and Variable
da_a = DataArray(a)
da_b = DataArray(b)
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
# Test Dataset
ds_a = da_a.to_dataset(name="x")
ds_b = da_b.to_dataset(name="x")
assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b)
# Test IndexVariable
da_a = DataArray(a, dims=["x"], coords={"x": a})
da_b = DataArray(a, dims=["x"], coords={"x": b})
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
@requires_scipy_or_netCDF4
def test_normalize_token_with_backend(map_ds):
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
map_ds.to_netcdf(tmp_file)
read = xr.open_dataset(tmp_file)
assert not dask.base.tokenize(map_ds) == dask.base.tokenize(read)
read.close()
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_variables(compat):
var1 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var2 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var3 = xr.Variable(("y", "x"), da.zeros((20, 10), chunks=2))
with raise_if_dask_computes():
assert getattr(var1, compat)(var2, equiv=lazy_array_equiv)
# values are actually equal, but we don't know that till we compute, return None
with raise_if_dask_computes():
assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None
# shapes are not equal, return False without computes
with raise_if_dask_computes():
assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False
# if one or both arrays are numpy, return None
assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None
assert (
getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None
)
with raise_if_dask_computes():
assert getattr(var1, compat)(var2.transpose("y", "x"))
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_merge(compat):
da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=("y", "x"))
with raise_if_dask_computes():
xr.merge([da1, da2], compat=compat)
# shapes are not equal; no computes necessary
with raise_if_dask_computes(max_computes=0):
with pytest.raises(ValueError):
xr.merge([da1, da3], compat=compat)
with raise_if_dask_computes(max_computes=2):
xr.merge([da1, da2 / 2], compat=compat)
@pytest.mark.filterwarnings("ignore::FutureWarning") # transpose_coords
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
@pytest.mark.parametrize(
"transform",
[
lambda a: a.assign_attrs(new_attr="anew"),
lambda a: a.assign_coords(cxy=a.cxy),
lambda a: a.copy(),
lambda a: a.isel(x=np.arange(a.sizes["x"])),
lambda a: a.isel(x=slice(None)),
lambda a: a.loc[dict(x=slice(None))],
lambda a: a.loc[dict(x=np.arange(a.sizes["x"]))],
lambda a: a.loc[dict(x=a.x)],
lambda a: a.sel(x=a.x),
lambda a: a.sel(x=a.x.values),
lambda a: a.transpose(...),
lambda a: a.squeeze(), # no dimensions to squeeze
lambda a: a.sortby("x"), # "x" is already sorted
lambda a: a.reindex(x=a.x),
lambda a: a.reindex_like(a),
lambda a: a.rename({"cxy": "cnew"}).rename({"cnew": "cxy"}),
lambda a: a.pipe(lambda x: x),
lambda a: xr.align(a, xr.zeros_like(a))[0],
# assign
# swap_dims
# set_index / reset_index
],
)
def test_transforms_pass_lazy_array_equiv(obj, transform):
with raise_if_dask_computes():
assert_equal(obj, transform(obj))
def test_more_transforms_pass_lazy_array_equiv(map_da, map_ds):
with raise_if_dask_computes():
assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy)
assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy)
assert_equal(map_ds.map(lambda x: x), map_ds)
assert_equal(map_ds.set_coords("a").reset_coords("a"), map_ds)
assert_equal(map_ds.update({"a": map_ds.a}), map_ds)
# fails because of index error
# assert_equal(
# map_ds.rename_dims({"x": "xnew"}).rename_dims({"xnew": "x"}), map_ds
# )
assert_equal(
map_ds.rename_vars({"cxy": "cnew"}).rename_vars({"cnew": "cxy"}), map_ds
)
assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da)
assert_equal(map_da.astype(map_da.dtype), map_da)
assert_equal(map_da.transpose("y", "x", transpose_coords=False).cxy, map_da.cxy)
def test_optimize():
# https://github.com/pydata/xarray/issues/3698
a = dask.array.ones((10, 4), chunks=(5, 2))
arr = xr.DataArray(a).chunk(5)
(arr2,) = dask.optimize(arr)
arr2.compute()
# The graph_manipulation module is in dask since 2021.2 but it became usable with
# xarray only since 2021.3
@pytest.mark.skipif(LooseVersion(dask.__version__) <= "2021.02.0", reason="new module")
def test_graph_manipulation():
"""dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder
function returned by __dask_postperist__; also, the dsk passed to the rebuilder is
a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict.
"""
import dask.graph_manipulation as gm
v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2
da = DataArray(v)
ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])})
v2, da2, ds2 = gm.clone(v, da, ds)
assert_equal(v2, v)
assert_equal(da2, da)
assert_equal(ds2, ds)
for a, b in ((v, v2), (da, da2), (ds, ds2)):
assert a.__dask_layers__() != b.__dask_layers__()
assert len(a.__dask_layers__()) == len(b.__dask_layers__())
assert a.__dask_graph__().keys() != b.__dask_graph__().keys()
assert len(a.__dask_graph__()) == len(b.__dask_graph__())
assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()
assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)
# Above we performed a slice operation; adding the two slices back together creates
# a diamond-shaped dependency graph, which in turn will trigger a collision in layer
# names if we were to use HighLevelGraph.cull() instead of
# HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__().
assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2)
| apache-2.0 |
stscieisenhamer/glue | glue/viewers/image/layer_artist.py | 1 | 13979 | from __future__ import absolute_import, division, print_function
import uuid
import weakref
import numpy as np
from glue.utils import defer_draw
from glue.viewers.image.state import ImageLayerState, ImageSubsetLayerState
from glue.viewers.matplotlib.layer_artist import MatplotlibLayerArtist
from glue.core.exceptions import IncompatibleAttribute
from glue.utils import color2rgb
from glue.core.link_manager import is_equivalent_cid
from glue.core import Data, HubListener
from glue.core.message import ComponentsChangedMessage
from glue.external.modest_image import imshow
class BaseImageLayerArtist(MatplotlibLayerArtist, HubListener):
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(BaseImageLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
self.reset_cache()
# Watch for changes in the viewer state which would require the
# layers to be redrawn
self._viewer_state.add_global_callback(self._update_image)
self.state.add_global_callback(self._update_image)
self.layer.hub.subscribe(self, ComponentsChangedMessage,
handler=self._update_compatibility,
filter=self._is_data_object)
self._update_compatibility()
def _is_data_object(self, message):
if isinstance(self.layer, Data):
return message.sender is self.layer
else:
return message.sender is self.layer.data
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
def _update_image(self, force=False, **kwargs):
raise NotImplementedError()
@defer_draw
def _update_compatibility(self, *args, **kwargs):
"""
Determine compatibility of data with reference data. For the data to be
compatible with the reference data, the number of dimensions has to
match and the pixel component IDs have to be equivalent.
"""
if self._viewer_state.reference_data is None:
self._compatible_with_reference_data = False
self.disable('No reference data defined')
return
if self.layer is self._viewer_state.reference_data:
self._compatible_with_reference_data = True
self.enable()
return
# Check whether the pixel component IDs of the dataset are equivalent
# to that of the reference dataset. In future this is where we could
# allow for these to be different and implement reprojection.
if self.layer.ndim != self._viewer_state.reference_data.ndim:
self._compatible_with_reference_data = False
self.disable('Data dimensions do not match reference data')
return
# Determine whether pixel component IDs are equivalent
pids = self.layer.pixel_component_ids
pids_ref = self._viewer_state.reference_data.pixel_component_ids
if isinstance(self.layer, Data):
data = self.layer
else:
data = self.layer.data
for i in range(data.ndim):
if not is_equivalent_cid(data, pids[i], pids_ref[i]):
self._compatible_with_reference_data = False
self.disable('Pixel component IDs do not match. You can try '
'fixing this by linking the pixel component IDs '
'of this dataset with those of the reference '
'dataset.')
return
self._compatible_with_reference_data = True
self.enable()
class ImageLayerArtist(BaseImageLayerArtist):
_layer_state_cls = ImageLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(ImageLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
# We use a custom object to deal with the compositing of images, and we
# store it as a private attribute of the axes to make sure it is
# accessible for all layer artists.
self.uuid = str(uuid.uuid4())
self.composite = self.axes._composite
self.composite.allocate(self.uuid)
self.composite.set(self.uuid, array=self.get_image_data,
shape=self.get_image_shape)
self.composite_image = self.axes._composite_image
def get_layer_color(self):
if self._viewer_state.color_mode == 'One color per layer':
return self.state.color
else:
return self.state.cmap
def enable(self):
if hasattr(self, 'composite_image'):
self.composite_image.invalidate_cache()
super(ImageLayerArtist, self).enable()
def remove(self):
super(ImageLayerArtist, self).remove()
self.composite.deallocate(self.uuid)
def get_image_shape(self):
if not self._compatible_with_reference_data:
return None
if self._viewer_state.x_att is None or self._viewer_state.y_att is None:
return None
x_axis = self._viewer_state.x_att.axis
y_axis = self._viewer_state.y_att.axis
full_shape = self.layer.shape
return full_shape[y_axis], full_shape[x_axis]
def get_image_data(self, view=None):
if not self._compatible_with_reference_data:
return None
try:
image = self.state.get_sliced_data(view=view)
except (IncompatibleAttribute, IndexError):
# The following includes a call to self.clear()
self.disable_invalid_attributes(self.state.attribute)
return None
else:
self.enable()
return image
def _update_image_data(self):
self.composite_image.invalidate_cache()
self.redraw()
@defer_draw
def _update_visual_attributes(self):
if not self.enabled:
return
if self._viewer_state.color_mode == 'Colormaps':
color = self.state.cmap
else:
color = self.state.color
self.composite.set(self.uuid,
clim=(self.state.v_min, self.state.v_max),
visible=self.state.visible,
zorder=self.state.zorder,
color=color,
contrast=self.state.contrast,
bias=self.state.bias,
alpha=self.state.alpha,
stretch=self.state.stretch)
self.composite_image.invalidate_cache()
self.redraw()
@defer_draw
def _update_image(self, force=False, **kwargs):
if self.state.attribute is None or self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if 'reference_data' in changed or 'layer' in changed:
self._update_compatibility()
if force or any(prop in changed for prop in ('layer', 'attribute',
'slices', 'x_att', 'y_att')):
self._update_image_data()
force = True # make sure scaling and visual attributes are updated
if force or any(prop in changed for prop in ('v_min', 'v_max', 'contrast',
'bias', 'alpha', 'color_mode',
'cmap', 'color', 'zorder',
'visible', 'stretch')):
self._update_visual_attributes()
@defer_draw
def update(self):
self._update_image(force=True)
# Reset the axes stack so that pressing the home button doesn't go back
# to a previous irrelevant view.
self.axes.figure.canvas.toolbar.update()
self.redraw()
class ImageSubsetArray(object):
def __init__(self, viewer_state, layer_artist):
self._viewer_state = weakref.ref(viewer_state)
self._layer_artist = weakref.ref(layer_artist)
self._layer_state = weakref.ref(layer_artist.state)
@property
def layer_artist(self):
return self._layer_artist()
@property
def layer_state(self):
return self._layer_state()
@property
def viewer_state(self):
return self._viewer_state()
@property
def shape(self):
x_axis = self.viewer_state.x_att.axis
y_axis = self.viewer_state.y_att.axis
full_shape = self.layer_state.layer.shape
return full_shape[y_axis], full_shape[x_axis]
@property
def nan_array(self):
return np.ones(self.shape) * np.nan
def __getitem__(self, view=None):
if (self.layer_artist is None or
self.layer_state is None or
self.viewer_state is None):
return self.nan_array
if not self.layer_artist._compatible_with_reference_data:
return self.nan_array
try:
mask = self.layer_state.get_sliced_data(view=view)
except IncompatibleAttribute:
self.layer_artist.disable_incompatible_subset()
return self.nan_array
else:
self.layer_artist.enable()
r, g, b = color2rgb(self.layer_state.color)
mask = np.dstack((r * mask, g * mask, b * mask, mask * .5))
mask = (255 * mask).astype(np.uint8)
return mask
@property
def dtype(self):
return np.uint8
@property
def ndim(self):
return 2
@property
def size(self):
return np.product(self.shape)
class ImageSubsetLayerArtist(BaseImageLayerArtist):
_layer_state_cls = ImageSubsetLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(ImageSubsetLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
self.subset_array = ImageSubsetArray(self._viewer_state, self)
self.image_artist = imshow(self.axes, self.subset_array,
origin='lower', interpolation='nearest',
vmin=0, vmax=1, aspect=self._viewer_state.aspect)
self.mpl_artists = [self.image_artist]
@defer_draw
def _update_visual_attributes(self):
if not self.enabled:
return
# TODO: deal with color using a colormap instead of having to change data
self.image_artist.set_visible(self.state.visible)
self.image_artist.set_zorder(self.state.zorder)
self.image_artist.set_alpha(self.state.alpha)
self.redraw()
def _update_image(self, force=False, **kwargs):
if self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if 'reference_data' in changed or 'layer' in changed:
self._update_compatibility()
if force or any(prop in changed for prop in ('layer', 'attribute', 'color',
'x_att', 'y_att', 'slices')):
self.image_artist.invalidate_cache()
self.redraw() # forces subset to be recomputed
force = True # make sure scaling and visual attributes are updated
if force or any(prop in changed for prop in ('zorder', 'visible', 'alpha')):
self._update_visual_attributes()
def enable(self):
super(ImageSubsetLayerArtist, self).enable()
# We need to now ensure that image_artist, which may have been marked
# as not being visible when the layer was cleared is made visible
# again.
if hasattr(self, 'image_artist'):
self.image_artist.invalidate_cache()
self._update_visual_attributes()
@defer_draw
def update(self):
# TODO: determine why this gets called when changing the transparency slider
self._update_image(force=True)
self.redraw()
| bsd-3-clause |
Joukahainen/trading-with-python | lib/interactivebrokers.py | 77 | 18140 | """
Copyright: Jev Kuznetsov
Licence: BSD
Interface to interactive brokers together with gui widgets
"""
import sys
# import os
from time import sleep
from PyQt4.QtCore import (SIGNAL, SLOT)
from PyQt4.QtGui import (QApplication, QFileDialog, QDialog, QVBoxLayout, QHBoxLayout, QDialogButtonBox,
QTableView, QPushButton, QWidget, QLabel, QLineEdit, QGridLayout, QHeaderView)
import ib
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from ib.ext.Order import Order
import logger as logger
from qtpandas import DataFrameModel, TableView
from eventSystem import Sender
import numpy as np
import pandas
from pandas import DataFrame, Index
from datetime import datetime
import os
import datetime as dt
import time
priceTicks = {1: 'bid', 2: 'ask', 4: 'last', 6: 'high', 7: 'low', 9: 'close', 14: 'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol, secType='STK', exchange='SMART', currency='USD'):
""" contract factory function """
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = secType
contract.m_exchange = exchange
contract.m_currency = currency
return contract
def _str2datetime(s):
""" convert string to datetime """
return datetime.strptime(s, '%Y%m%d')
def readActivityFlex(fName):
"""
parse trade log in a csv file produced by IB 'Activity Flex Query'
the file should contain these columns:
['Symbol','TradeDate','Quantity','TradePrice','IBCommission']
Returns:
A DataFrame with parsed trade data
"""
import csv
rows = []
with open(fName, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
header = ['TradeDate', 'Symbol', 'Quantity', 'TradePrice', 'IBCommission']
types = dict(zip(header, [_str2datetime, str, int, float, float]))
idx = dict(zip(header, [rows[0].index(h) for h in header]))
data = dict(zip(header, [[] for h in header]))
for row in rows[1:]:
print row
for col in header:
val = types[col](row[idx[col]])
data[col].append(val)
return DataFrame(data)[header].sort(column='TradeDate')
class Subscriptions(DataFrameModel, Sender):
""" a data table containing price & subscription data """
def __init__(self, tws=None):
super(Subscriptions, self).__init__()
self.df = DataFrame() # this property holds the data in a table format
self._nextId = 1
self._id2symbol = {} # id-> symbol lookup dict
self._header = ['id', 'position', 'bid', 'ask', 'last'] # columns of the _data table
# register callbacks
if tws is not None:
tws.register(self.priceHandler, message.TickPrice)
tws.register(self.accountHandler, message.UpdatePortfolio)
def add(self, symbol, subId=None):
"""
Add a subscription to data table
return : subscription id
"""
if subId is None:
subId = self._nextId
data = dict(zip(self._header, [subId, 0, np.nan, np.nan, np.nan]))
row = DataFrame(data, index=Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
self._nextId = subId + 1
self._rebuildIndex()
self.emit(SIGNAL("layoutChanged()"))
return subId
def priceHandler(self, msg):
""" handler function for price updates. register this with ibConnection class """
if priceTicks[msg.field] not in self._header: # do nothing for ticks that are not in _data table
return
self.df[priceTicks[msg.field]][self._id2symbol[msg.tickerId]] = msg.price
#notify viewer
col = self._header.index(priceTicks[msg.field])
row = self.df.index.tolist().index(self._id2symbol[msg.tickerId])
idx = self.createIndex(row, col)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), idx, idx)
def accountHandler(self, msg):
if msg.contract.m_symbol in self.df.index.tolist():
self.df['position'][msg.contract.m_symbol] = msg.position
def _rebuildIndex(self):
""" udate lookup dictionary id-> symbol """
symbols = self.df.index.tolist()
ids = self.df['id'].values.tolist()
self._id2symbol = dict(zip(ids, symbols))
def __repr__(self):
return str(self.df)
class Broker(object):
"""
Broker class acts as a wrapper around ibConnection
from ibPy. It tracks current subscriptions and provides
data models to viewiers .
"""
def __init__(self, name='broker'):
""" initialize broker class
"""
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self.tws = ibConnection() # tws interface
self.nextValidOrderId = None
self.dataModel = Subscriptions(self.tws) # data container
self.tws.registerAll(self.defaultHandler)
#self.tws.register(self.debugHandler,message.TickPrice)
self.tws.register(self.nextValidIdHandler, 'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True, '')
def subscribeStk(self, symbol, secType='STK', exchange='SMART', currency='USD'):
""" subscribe to stock data """
self.log.debug('Subscribing to ' + symbol)
# if symbol in self.data.symbols:
# print 'Already subscribed to {0}'.format(symbol)
# return
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self.dataModel.add(symbol)
self.tws.reqMktData(subId, c, '', False)
self.contracts[symbol] = c
return subId
@property
def data(self):
return self.dataModel.df
def placeOrder(self, symbol, shares, limit=None, exchange='SMART', transmit=0):
""" place an order on already subscribed contract """
if symbol not in self.contracts.keys():
self.log.error("Can't place order, not subscribed to %s" % symbol)
return
action = {-1: 'SELL', 1: 'BUY'}
o = Order()
o.m_orderId = self.getOrderId()
o.m_action = action[cmp(shares, 0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
self.log.debug('Placing %s order for %i %s (id=%i)' % (o.m_action, o.m_totalQuantity, symbol, o.m_orderId))
self.tws.placeOrder(o.m_orderId, self.contracts[symbol], o)
def getOrderId(self):
self.nextValidOrderId += 1
return self.nextValidOrderId - 1
def unsubscribeStk(self, symbol):
self.log.debug('Function not implemented')
def disconnect(self):
self.tws.disconnect()
def __del__(self):
"""destructor, clean up """
print 'Broker is cleaning up after itself.'
self.tws.disconnect()
def debugHandler(self, msg):
print msg
def defaultHandler(self, msg):
""" default message handler """
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def nextValidIdHandler(self, msg):
self.nextValidOrderId = msg.orderId
self.log.debug('Next valid order id:{0}'.format(self.nextValidOrderId))
def saveData(self, fname):
""" save current dataframe to csv """
self.log.debug("Saving data to {0}".format(fname))
self.dataModel.df.to_csv(fname)
# def __getattr__(self, name):
# """ x.__getattr__('name') <==> x.name
# an easy way to call ibConnection methods
# @return named attribute from instance tws
# """
# return getattr(self.tws, name)
class _HistDataHandler(object):
""" handles incoming messages """
def __init__(self, tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler, message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open': [], 'high': [], 'low': [], 'close': [], 'volume': [], 'count': [], 'WAP': []}
def msgHandler(self, msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date, timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date, dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
""" return downloaded data as a DataFrame """
df = DataFrame(data=self._data, index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self, debug=False):
self._log = logger.getLogger('DLD')
self._log.debug(
'Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__, ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler, message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self, msg):
print '[debug]', msg
def requestData(self, contract, endDateTime, durationStr='1 D', barSizeSetting='30 secs', whatToShow='TRADES',
useRTH=1, formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol, endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH,
formatDate)
self._reqId += 1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time() - startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self, contract, dateTuple):
""" get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
"""
openTime = dt.datetime(*dateTuple) + dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple) + dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime, closeTime, freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract, t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class TimeKeeper(object):
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~') + '/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir, 'requests.txt'))
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
""" adds a timestamp of current request"""
with open(self.dataFile, 'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat) + '\n')
def nrRequests(self, timeSpan=600):
""" return number of requests in past timespan (s) """
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile, 'r') as f:
lines = f.readlines()
for line in lines:
if now - dt.datetime.strptime(line.strip(), self._timeFormat) < delta:
requests += 1
if requests == 0: # erase all contents if no requests are relevant
open(self.dataFile, 'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
#---------------test functions-----------------
def dummyHandler(msg):
print msg
def testConnection():
""" a simple test to check working of streaming prices etc """
tws = ibConnection()
tws.registerAll(dummyHandler)
tws.connect()
c = createContract('SPY')
tws.reqMktData(1, c, '', False)
sleep(3)
print 'testConnection done.'
def testSubscriptions():
s = Subscriptions()
s.add('SPY')
#s.add('XLE')
print s
def testBroker():
b = Broker()
sleep(2)
b.subscribeStk('SPY')
b.subscribeStk('XLE')
b.subscribeStk('GOOG')
b.placeOrder('ABC', 125, 55.1)
sleep(3)
return b
#---------------------GUI stuff--------------------------------------------
class AddSubscriptionDlg(QDialog):
def __init__(self, parent=None):
super(AddSubscriptionDlg, self).__init__(parent)
symbolLabel = QLabel('Symbol')
self.symbolEdit = QLineEdit()
secTypeLabel = QLabel('secType')
self.secTypeEdit = QLineEdit('STK')
exchangeLabel = QLabel('exchange')
self.exchangeEdit = QLineEdit('SMART')
currencyLabel = QLabel('currency')
self.currencyEdit = QLineEdit('USD')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
lay = QGridLayout()
lay.addWidget(symbolLabel, 0, 0)
lay.addWidget(self.symbolEdit, 0, 1)
lay.addWidget(secTypeLabel, 1, 0)
lay.addWidget(self.secTypeEdit, 1, 1)
lay.addWidget(exchangeLabel, 2, 0)
lay.addWidget(self.exchangeEdit, 2, 1)
lay.addWidget(currencyLabel, 3, 0)
lay.addWidget(self.currencyEdit, 3, 1)
lay.addWidget(buttonBox, 4, 0, 1, 2)
self.setLayout(lay)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.setWindowTitle("Add subscription")
class BrokerWidget(QWidget):
def __init__(self, broker, parent=None):
super(BrokerWidget, self).__init__(parent)
self.broker = broker
self.dataTable = TableView()
self.dataTable.setModel(self.broker.dataModel)
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.dataTable.resizeColumnsToContents()
dataLabel = QLabel('Price Data')
dataLabel.setBuddy(self.dataTable)
dataLayout = QVBoxLayout()
dataLayout.addWidget(dataLabel)
dataLayout.addWidget(self.dataTable)
addButton = QPushButton("&Add Symbol")
saveDataButton = QPushButton("&Save Data")
#deleteButton = QPushButton("&Delete")
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(addButton)
buttonLayout.addWidget(saveDataButton)
buttonLayout.addStretch()
layout = QHBoxLayout()
layout.addLayout(dataLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(addButton, SIGNAL('clicked()'), self.addSubscription)
self.connect(saveDataButton, SIGNAL('clicked()'), self.saveData)
#self.connect(deleteButton,SIGNAL('clicked()'),self.deleteSubscription)
def addSubscription(self):
dialog = AddSubscriptionDlg(self)
if dialog.exec_():
self.broker.subscribeStk(str(dialog.symbolEdit.text()), str(dialog.secTypeEdit.text()),
str(dialog.exchangeEdit.text()), str(dialog.currencyEdit.text()))
def saveData(self):
""" save data to a .csv file """
fname = unicode(QFileDialog.getSaveFileName(self, caption="Save data to csv", filter='*.csv'))
if fname:
self.broker.saveData(fname)
# def deleteSubscription(self):
# pass
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(640, 480)
self.setWindowTitle('Broker test')
self.broker = Broker()
self.broker.subscribeStk('SPY')
self.broker.subscribeStk('XLE')
self.broker.subscribeStk('GOOG')
brokerWidget = BrokerWidget(self.broker, self)
lay = QVBoxLayout()
lay.addWidget(brokerWidget)
self.setLayout(lay)
def startGui():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
import ib
print 'iby version:', ib.version
#testConnection()
#testBroker()
#testSubscriptions()
print message.messageTypeNames()
startGui()
print 'All done'
| bsd-3-clause |
PlummerLab/2015-05-05-AvrRvi5_candidate_transcript_validation | lib/draw_wrappers.py | 1 | 4173 | from gene_shapes import Triangle
from gene_shapes import OpenTriangle
import matplotlib.patches as patches
from matplotlib.path import Path
from matplotlib.text import Text
def draw_region(
seq,
start=None,
end=None,
intron_threshold=1000,
exon=Triangle(width=1),
intron=OpenTriangle(width=0.5, y_offset=0.5),
other_shapes=dict(),
names_to_print=dict(),
):
"""
Keyword arguments:
names_to_print -- dict.
"""
if start is None:
start = 0
if end is None:
end = len(seq)
feature_patches = list()
text_patches = list()
for feature in seq[start:end].features:
if feature.id in names_to_print:
if 's' not in names_to_print[feature.id]:
names_to_print[feature.id]['s'] = feature.id
start = feature.location.start
end = feature.location.end
names_to_print[feature.id]['x'] = start + 0.5 * (end - start)
text_patches.append(names_to_print[feature.id])
if feature.type == 'CDS' and exon is not None:
exons = list()
introns = list()
reverse = feature.strand == -1
parts = sorted(feature.location.parts, key=lambda f: min(f.start, f.end), reverse=reverse)
for i in range(len(parts)):
strand = parts[i].strand
# Draw intron if not the last exon
if i > 0 and intron is not None:
if strand in {None, 0, 1}:
strand = 1
start = parts[i - 1].end
distance = (parts[i].start - parts[i - 1].end)
else:
strand = -1
start = parts[i - 1].start
distance = (parts[i].end - parts[i - 1].start)
if abs(distance) >= intron_threshold:
incl_intron = True
verts, codes = intron(
start,
0.,
distance
)
introns.append([start, distance])
p = Path(verts, codes)
feature_patches.append(
patches.PathPatch(
p,
**intron.properties
)
)
else:
incl_intron = False
# Now draw the exon
start = parts[i].start
if strand in {None, 0, 1}:
strand = 1
start = parts[i].start
else:
strand = -1
start = parts[i].end
distance = (parts[i].end - parts[i].start) * strand
if len(exons) == 0 or incl_intron:
exons.append([start, distance])
else: # Join two exons
exons[-1][1] += distance
for e in exons:
verts, codes = exon(e[0], 0., e[1])
p = Path(verts, codes)
feature_patches.append(
patches.PathPatch(
p,
**exon.properties
)
)
elif feature.type in other_shapes:
part = feature.location
strand = part.strand
if strand in {None, 0, 1}:
strand = 1
start = part.start
else:
strand = -1
start = part.end
distance = (part.end - part.start) * strand
verts, codes = other_shapes[feature.type](start, 0., distance)
p = Path(verts, codes)
feature_patches.append(
patches.PathPatch(
p,
**other_shapes[feature.type].properties
)
)
return feature_patches, text_patches
| mit |
Fireblend/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
MatthieuBizien/scikit-learn | examples/svm/plot_svm_scale_c.py | 6 | 5404 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size, n_iter=250,
random_state=1))
grid.fit(X, y)
scores = grid.results_['test_mean_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
ritviksahajpal/LUH2 | doc/sphinxext/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit |
wlamond/scikit-learn | sklearn/neighbors/__init__.py | 71 | 1025 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
from .lof import LocalOutlierFactor
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest',
'LocalOutlierFactor']
| bsd-3-clause |
ElcoLuijendijk/pyGISlib | classify_raster.py | 1 | 2111 | """
read a raster file and a classification table and reclassify the raster
reclassification works by assigning a new value based to the existing raster
based on the closest value in the classification table
to use simply change the filename variable below to point to your input raster
and adjust the raster classification values in the file data/raster_classes.csv
Elco Luijendijk, 14 march 2016
"""
import numpy as np
import pandas as pd
try:
import pyGISlib
except ImportError:
import lib.pyGISlib as pyGISlib
maxDepth = 0.0
Nbands = 3
noDataValue = -99999
filename = 'examples/example_raster.tif'
# read raster file
print 'loading raster file ', filename
raster, dimensions, origin, cellsize, nodata, projection = \
pyGISlib.read_raster_file(filename)
Nbands, nx, ny = raster.shape
raster_mod = np.ones_like(raster)
df = pd.read_csv('examples/raster_classes.csv')
# values for color legend in figure
raster_values = (df['raster_values_min'] + df['raster_values_max']) / 2.0
Nintervals = len(raster_values)
band_values = np.zeros((Nintervals, Nbands))
for i in range(Nbands):
band_values[:, i] = df['value_band%i' % (i + 1)].values
# determine closeness to band values:
diff_raster = np.zeros((Nbands, nx, ny, Nintervals))
for i in xrange(Nbands):
for j in xrange(Nintervals):
diff_raster[i, :, :, j] = np.abs(raster[i, :, :] - band_values[j, i])
diff_raster_sum = np.zeros((nx, ny, Nintervals))
# sum the differences for each band
for j in xrange(Nintervals):
diff_raster_sum[:, :, j] = np.sum(diff_raster[:, :, :, j], axis=0)
raster_class = np.argmin(diff_raster_sum, axis=2)
raster_class_final = np.zeros(raster_class.shape)
for i in xrange(Nintervals):
ind = np.where(raster_class == i)
raster_class_final[ind] = raster_values[i]
# save raster
outputFilename = 'examples/classified_raster.tif'
print 'saving raster file %s' % outputFilename
pyGISlib.write_raster_file(outputFilename,
raster_class_final,
origin, cellsize,
nodata, crs=projection)
print 'done'
| gpl-3.0 |
rosswhitfield/mantid | qt/python/mantidqt/widgets/waterfallplotfillareadialog/presenter.py | 3 | 3112 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib.collections import PolyCollection
from mantid.plots import datafunctions
from mantidqt.widgets.plotconfigdialog.colorselector import convert_color_to_hex
from mantidqt.widgets.waterfallplotfillareadialog.view import WaterfallPlotFillAreaDialogView
class WaterfallPlotFillAreaDialogPresenter:
def __init__(self, fig, view=None, parent=None):
self.fig = fig
self.ax = fig.get_axes()[0]
if view:
self.view = view
else:
self.view = WaterfallPlotFillAreaDialogView(parent)
self.init_view()
self.view.show()
# Signals
self.view.close_push_button.clicked.connect(self.view.close)
self.view.enable_fill_group_box.clicked.connect(lambda: self.set_fill_enabled())
self.view.use_line_colour_radio_button.clicked.connect(self.line_colour_fill)
self.view.use_solid_colour_radio_button.clicked.connect(self.solid_colour_fill)
self.view.colour_selector_widget.line_edit.textChanged.connect(self.solid_colour_fill)
def init_view(self):
# This function sets the correct values in the menu when it is first opened.
if self.ax.waterfall_has_fill():
self.view.enable_fill_group_box.setChecked(True)
if datafunctions.waterfall_fill_is_line_colour(self.ax):
self.view.use_line_colour_radio_button.setChecked(True)
else:
self.view.use_solid_colour_radio_button.setChecked(True)
poly = next(poly_collection for poly_collection in self.ax.collections
if isinstance(poly_collection, PolyCollection))
self.view.colour_selector_widget.set_color(convert_color_to_hex(poly.get_facecolor().tolist()[0]))
def set_fill_enabled(self):
if self.view.enable_fill_group_box.isChecked():
if self.view.use_line_colour_radio_button.isChecked():
self.line_colour_fill()
else:
self.solid_colour_fill()
else:
self.remove_fill()
def line_colour_fill(self):
datafunctions.line_colour_fill(self.ax)
def solid_colour_fill(self):
# If the colour selector has been changed then presumably the user wants to set a custom fill colour
# so that option is checked if it wasn't already.
if not self.view.use_solid_colour_radio_button.isChecked():
self.view.use_solid_colour_radio_button.setChecked(True)
colour = self.view.colour_selector_widget.get_color()
datafunctions.solid_colour_fill(self.ax, colour)
def create_fill(self):
self.ax.set_waterfall_fill(True)
def remove_fill(self):
self.ax.set_waterfall_fill(False)
| gpl-3.0 |
analysiscenter/dataset | batchflow/models/sklearn.py | 1 | 3097 | """ Contains models for sci-kit learn estimators """
try:
from sklearn.external import joblib as pickle
except ImportError:
pass
try:
import dill as pickle
except ImportError:
pass
from .base import BaseModel
class SklearnModel(BaseModel):
""" Base class for scikit-learn models
Attributes
----------
estimator
an instance of scikit-learn estimator
Notes
-----
**Configuration**
estimator - an instance of scikit-learn estimator
load / path - a path to a pickled estimator
Examples
--------
.. code-block:: python
pipeline
.init_model('static', SklearnModel, 'my_model',
config={'estimator': sklearn.linear_model.SGDClassifier(loss='huber')})
pipeline
.init_model('static', SklearnModel, 'my_model',
config={'load/path': '/path/to/estimator.pickle'})
"""
def __init__(self, *args, **kwargs):
self.estimator = None
super().__init__(*args, **kwargs)
def build(self, *args, **kwargs):
""" Define the model """
_ = args, kwargs
self.estimator = self.config.get('estimator')
def reset(self):
""" Reset the trained model to allow a new training from scratch """
self.build()
def load(self, path):
""" Load the model.
Parameters
----------
path : str
a full path to a file from which a model will be loaded
"""
self.estimator = pickle.load(path)
def save(self, path):
""" Save the model.
Parameters
----------
path : str
a full path to a file where a model will be saved to
"""
if self.estimator is not None:
pickle.dump(self.estimator, path)
else:
raise ValueError("Scikit-learn estimator does not exist. Check your config for 'estimator'.")
def train(self, X, y, *args, **kwargs):
""" Train the model with the data provided
Parameters
----------
X : array-like
Subset of the training data, shape (n_samples, n_features)
y : numpy array
Subset of the target values, shape (n_samples,)
Notes
-----
For more details and other parameters look at the documentation for the estimator used.
"""
if hasattr(self.estimator, 'partial_fit'):
self.estimator.partial_fit(X, y, *args, **kwargs)
else:
self.estimator.fit(X, y, *args, **kwargs)
def predict(self, X, *args, **kwargs):
""" Predict with the data provided
Parameters
----------
X : array-like
Subset of the training data, shape (n_samples, n_features)
Notes
-----
For more details and other parameters look at the documentation for the estimator used.
Returns
-------
array
Predicted value per sample, shape (n_samples,)
"""
return self.estimator.predict(X, *args, **kwargs)
| apache-2.0 |
mbinkowski/opt-mmd | gan/model_mmd_gan.py | 1 | 3138 | from __future__ import division, print_function
from glob import glob
import os
import time
import numpy as np
import scipy.misc
from six.moves import xrange
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
import lmdb
import io
import sys
from IPython.display import display
from model_mmd2 import MMD_GAN, tf, np
import mmd as MMD
import load
from ops import batch_norm, conv2d, deconv2d, linear, lrelu
from utils import save_images, unpickle, read_and_scale, center_and_scale, variable_summaries, conv_sizes, pp
import pprint
class MMDCE_GAN(MMD_GAN):
def set_loss(self, G, images):
with tf.variable_scope("discriminator") as scope:
G1 = linear(G, 1, 'd_htop_lin')
scope.reuse_variables()
images1 = linear(images, 1, 'd_htop_lin')
# no need to ouput sigmoids, loss function below takes logits
self.gan_ce_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=G1, labels=tf.zeros_like(G1))) # fake
self.gan_ce_loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=images1, labels=tf.ones_like(images1))) #real
super(MMDCE_GAN, self).set_loss(G, images)
self.optim_name = 'kernel+cross_entropy_loss'
def add_gradient_penalty(self, kernel, fake_data, real_data):
alpha = tf.random_uniform(shape=[self.batch_size, 1])
if 'mid' in self.config.suffix:
alpha = .4 + .2 * alpha
elif 'edges' in self.config.suffix:
qq = tf.cast(tf.reshape(tf.multinomial([[.5, .5]], self.batch_size),
[self.batch_size, 1]), tf.float32)
alpha = .1 * alpha * qq + (1. - .1 * alpha) * (1. - qq)
elif 'edge' in self.config.suffix:
alpha = .99 + .01 * alpha
x_hat = (1. - alpha) * real_data + alpha * fake_data
Ekx = lambda yy: tf.reduce_mean(kernel(x_hat, yy, K_XY_only=True), axis=1)
witness = Ekx(real_data) - Ekx(fake_data)
gradients = tf.gradients(witness, [x_hat])[0]
penalty = tf.reduce_mean(tf.square(tf.norm(gradients, axis=1) - 1.0))
print('adding gradient penalty')
# We need to:
# - minimize MMD wrt generator
# - maximize MMD wrt discriminator
# - minimize GAN cross-entropy wrt discriminator
if self.config.gradient_penalty > 0:
self.gp = tf.get_variable('gradient_penalty', dtype=tf.float32,
initializer=self.config.gradient_penalty)
self.g_loss = self.mmd_loss
self.d_loss = -self.mmd_loss + penalty * self.gp + self.gan_ce_loss
self.optim_name += ' gp %.1f' % self.config.gradient_penalty
else:
self.g_loss = self.mmd_loss
self.d_loss = -self.mmd_loss + self.gan_ce_loss
variable_summaries([(gradients, 'dx_gradients')])
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss)
tf.summary.scalar('dx_penalty', penalty)
| bsd-3-clause |
tsurumeso/pysparcl | pysparcl/subfunc.py | 1 | 2830 | import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.cluster import KMeans
from pysparcl import utils
def _get_uw(ds, wbound, niter, uorth=None):
n, p = ds.shape
u = np.random.randn(p)
w = (np.ones(p) / p) * wbound
w_old = np.random.standard_normal(p)
iter = 0
if uorth is not None:
if np.sum(np.abs(uorth - uorth.T)) > 1e-10:
return None
uorth = squareform(uorth)
uorth /= np.sqrt(np.sum(np.square(uorth)))
while (iter < niter and
np.sum(np.abs(w_old - w) / np.sum(np.abs(w_old))) > 1e-4):
if iter == 0:
u = ds.dot(w.T)
else:
u = ds[:, argw >= lam].dot(w[argw >= lam].T)
if uorth is not None:
u -= uorth.dot(uorth.T.dot(u))
iter += 1
u = u / np.linalg.norm(u)
w_old = w.copy()
argw = np.maximum(u.dot(ds), 0).T
lam = utils._binary_search(argw, wbound)
w = utils._soft_thresholding(argw, lam)
w /= np.linalg.norm(w)
u = ds[:, argw >= lam].dot(w[argw >= lam].T) / np.sum(w)
if uorth is not None:
u -= uorth.dot(uorth.T.dot(u))
u /= np.linalg.norm(u)
w /= np.linalg.norm(w)
crit = np.sum(u * (ds.dot(w.T)))
u /= np.sqrt(2.)
return u, w, crit
def _get_wcss(x, cs, ws=None):
wcss_perf = np.zeros(x.shape[1])
for i in np.unique(cs):
mask = (cs == i)
if np.sum(mask) > 1:
wcss_perf += np.sum(
np.square(x[mask, :] - np.mean(x[mask, :], axis=0)), axis=0)
bcss_perf = np.sum(np.square(x - np.mean(x, axis=0)), axis=0) - wcss_perf
return wcss_perf, bcss_perf
def _update_cs(x, k, ws, cs):
x = x[:, ws != 0]
z = x * np.sqrt(ws[ws != 0])
nrowz = z.shape[0]
mus = None
if cs is not None:
for i in np.unique(cs):
if np.sum(cs == i) > 1:
mus = utils._rbind(mus, np.mean(z[cs == i, :], axis=0))
if np.sum(cs == i) == 1:
mus = utils._rbind(mus, z[cs == i, :])
if mus is None:
km = KMeans(k, init='random', n_init=10).fit(z)
else:
distmat = squareform(pdist(utils._rbind(z, mus)))
distmat = distmat[:nrowz, (nrowz + 1):(nrowz + k)]
nearest = distmat.argmin(axis=1)
if len(np.unique(nearest)) == k:
km = KMeans(k, init=mus, n_init=1).fit(z)
else:
km = KMeans(k, init='random', n_init=10).fit(z)
return km.labels_
def _update_ws(x, cs, wbound):
wcss_perf = _get_wcss(x, cs)[0]
tss_perf = _get_wcss(x, np.ones(x.shape[0]))[0]
lam = utils._binary_search(-wcss_perf + tss_perf, wbound)
ws_unscaled = utils._soft_thresholding(-wcss_perf + tss_perf, lam)
return ws_unscaled / np.linalg.norm(ws_unscaled)
| gpl-2.0 |
ovgarol/chaPulin9.0 | simPulsar/galParameters.py | 1 | 5836 | #!/usr/bin/env python
""" Calcula y grafica los parametros para un modelo basado
en las estadisticas de ATNF.
"""
import numpy as np
#import scipy as sp
#import time as tm
import matplotlib.pyplot as plt
from scipy import optimize, array, stats
from matplotlib import rc, rcParams
rc('text',usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern']})
###########################################################################################
# MAIN
###########################################################################################
if __name__ == "__main__":
###########################################################################################
# Getting data from file
###########################################################################################
metal = [[-1.,-0.5,0.0,0.5,1.0],\
[7128,7179,7194,6778,6660],\
[689,588,639,726,611],\
[33.18,33.86,33.82,31.32,30.36],\
[6.33,6.41,6.79,6.38,6.26]]
f = open('sim_parametros.gal', 'r') # open file to read
index = []
Np = []
#C0 = []
#C1 = []
Nc = []
M = []
D = []
# index C0 C1 Np
for line in f: # iterate over the lines in the file
columns = line.split(' ') # split the line into a list of column values
columns = [col.strip() for col in columns] # clean any whitespace off the items
index.append(float(columns[0]))
#C0.append(np.log(float(columns[1])))
#C1.append(np.log(float(columns[2])))
Np.append((float(columns[3])))
Nc.append((float(columns[4])))
M.append((float(columns[5])))
D.append((float(columns[6])))
f.close()
index = array(index)
Np = array(Np)
#C0 = array(C0)
#C1 = array(C0)
Nc = array(Nc)
M = array(M)
D = array(D)
logNp = np.log10(Np)
logNc = np.log10(Nc)
"""
plt.hist(C0,10,color='k',linewidth=2,histtype='step', label='C0')#, range=rango)
plt.legend(loc='upper left')
plt.xlabel('$\log C0$')
plt.ylabel('$N$')
plt.grid(True)
#plt.show()
plt.savefig('N_logC0_.pdf')
plt.clf()
plt.hist(C1,10,color='k',linewidth=2,histtype='step', label='C1')#, range=rango)
plt.legend(loc='upper left')
plt.xlabel('$\log C1$')
plt.ylabel('$N$')
plt.grid(True)
#plt.show()
plt.savefig('N_logC1_.pdf')
plt.clf()
"""
plt.hist(logNp,10,color='k',linewidth=2,histtype='step')#, range=rango)
#plt.legend(loc='upper left')
plt.xlabel('$\log N_T$ [adim]')
plt.ylabel('$N$')
plt.grid(True)
#plt.show()
plt.savefig('N_logNT_.pdf')
plt.clf()
plt.plot(index,Np,',',color='k',alpha=1.)
#plt.legend(loc='best')
plt.xlabel(r'$\alpha_3$ [adim]')
plt.ylabel('$N_T$ [adim]')
#plt.xlim([-2,2])
#plt.ylim([-18,-12])
plt.grid(True)
#plt.show()
plt.savefig('index_NT.pdf')
plt.clf()
plt.hist(logNc,10,color='k',linewidth=2,histtype='step')#, range=rango)
#plt.legend(loc='upper left')
plt.xlabel('$\log N_p$ [adim]')
plt.ylabel('$N$')
plt.grid(True)
#plt.show()
plt.savefig('N_logNp_.pdf')
plt.clf()
""
plt.plot(index,Nc,',',color='k',alpha=1.)
#plt.legend(loc='best')
plt.xlabel(r'$\alpha_3$ [adim]')
plt.ylabel('$N_p$ [adim]')
#plt.xlim([-2,2])
#plt.ylim([-18,-12])
plt.grid(True)
#plt.show()
plt.savefig('index_Np.pdf')
plt.clf()
plt.plot(M,Np,',',color='k',alpha=1.)
plt.errorbar(metal[0],metal[1], yerr = metal[2], fmt='o', ls='--',color='k')
#plt.legend(loc='best')
plt.xlabel(r'$\Delta$ [Z] [dex]')
plt.ylabel('$N_T$ [adim]')
plt.xlim([-1.5,1.5])
#plt.ylim([-18,-12])
plt.grid(True)
#plt.show()
plt.savefig('M_NT.pdf')
plt.clf()
plt.plot(M,Nc,',',color='k',alpha=1.)
plt.errorbar(metal[0],metal[3], yerr=metal[4],fmt='o',color='k',ls='--')
#plt.legend(loc='best')
plt.xlabel(r'$\Delta$ [Z] [dex]')
plt.ylabel('$N_p$ [adim]')
plt.xlim([-1.5,1.5])
#plt.ylim([-18,-12])
plt.grid(True)
#plt.show()
plt.savefig('M_Np.pdf')
plt.clf()
plt.plot(D,Np,',',color='k',alpha=1.)
#plt.legend(loc='best')
plt.xlabel(r'$\Delta t$ [yr]')
plt.ylabel('$N_T$ [adim]')
#plt.xlim([-2,2])
#plt.ylim([-18,-12])
plt.grid(True)
#plt.show()
plt.savefig('D_NT.pdf')
plt.clf()
plt.plot(D,Nc,',',color='k',alpha=1.)
#plt.legend(loc='best')
plt.xlabel(r'$\Delta t$ [yr]')
plt.ylabel('$N_p$ [adim]')
#plt.xlim([-2,2])
#plt.ylim([-18,-12])
plt.grid(True)
#plt.show()
plt.savefig('D_Np.pdf')
plt.clf()
DD = open('sim.gal', 'a') # open file to read
print>>DD, 'N_T - ', Np.mean(), '-', Np.std()
print>>DD, 'N_p - ', Nc.mean(), '-', Nc.std()
DD.close()
#plt.plot(age[0],age[1],'o',color='k',alpha=.25)
plt.legend(loc='best')
plt.xlabel(r'$\Delta Z$ [dex]')
plt.ylabel(r'$N_{T}$ [adim]')
plt.xlim([-1.5,1.5])
plt.grid(True)
#plt.show()
plt.savefig('metal_Nt.jpg')
plt.clf()
#plt.errorbar(index[0],index[1], yerr = index[2], fmt='o', ls='--')
plt.legend(loc='best')
plt.xlabel(r'$\Delta Z$ [dex]')
plt.ylabel(r'$N_{p}$ [adim]')
plt.xlim([-1.5,1.5])
plt.grid(True)
#plt.show()
plt.savefig('metal_Np.jpg')
plt.clf()
dumi = []
dumd = []
for i in range(len(metal[0])):
dumi.append(metal[3][i]/metal[1][i])
dumd.append(metal[4][i]/metal[1][i])
#plt.plot(age[0],age[1],'o',color='k',alpha=.25)
plt.errorbar(metal[0],dumi, yerr = dumd, fmt='o', ls='--',color='k')
plt.legend(loc='best')
plt.xlabel(r'$\Delta Z$ [dex]')
plt.ylabel(r'$N_{p}/N_{T}$ [adim]')
plt.xlim([-1.5,1.5])
plt.grid(True)
#plt.show()
plt.savefig('metal_Np-Nt.jpg')
plt.clf()
del dumi, dumd
| agpl-3.0 |
belltailjp/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56311 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
alexeyum/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
lpenguin/pandas-qt | tests/test_DataSearch.py | 4 | 5775 | # -*- coding: utf-8 -*-
from pandasqt.compat import Qt, QtCore, QtGui
import pytest
import pytestqt
import decimal
import numpy
import pandas
from pandasqt.models.DataFrameModel import DataFrameModel, DATAFRAME_ROLE
from pandasqt.models.DataSearch import DataSearch
class TestDataSearch(object):
@pytest.fixture
def dataFrame(self):
data = [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]
]
columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz']
dataFrame = pandas.DataFrame(data, columns=columns)
return dataFrame
@pytest.fixture
def geoDataFrame(self):
data = [
[0, 1, 2, 3, 4, 49.1234, 8.123],
[5, 6, 7, 8, 9, 52.1234, 13.123],
[10, 11, 12, 13, 14, 55.1234, 16.123]
]
columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz', 'lat', 'lng']
dataFrame = pandas.DataFrame(data, columns=columns)
return dataFrame
def test_init(self, dataFrame):
filterString = 'Foo < 10'
datasearch = DataSearch("Test", filterString)
assert datasearch._filterString == filterString
assert isinstance(datasearch._dataFrame, pandas.DataFrame)
assert datasearch.name == 'Test'
datasearch = DataSearch("Test2")
assert datasearch._filterString == ''
assert isinstance(datasearch._dataFrame, pandas.DataFrame)
assert datasearch.name == 'Test2'
datasearch = DataSearch("Test3", dataFrame=dataFrame)
assert datasearch._filterString == ''
assert isinstance(datasearch._dataFrame, pandas.DataFrame)
assert datasearch.name == 'Test3'
assert len(datasearch._dataFrame.index) == 3
def test_repr(self, dataFrame):
datasearch = DataSearch("Test2")
assert str(datasearch).startswith('DataSearch(')
assert str(datasearch).endswith('Test2 ()')
def test_dataFrame(self, dataFrame):
datasearch = DataSearch("Test")
assert datasearch.dataFrame().empty
assert isinstance(datasearch.dataFrame(), pandas.DataFrame)
datasearch = DataSearch("Test", dataFrame=dataFrame)
assert len(datasearch.dataFrame()) == 3
def test_filterString(self):
datasearch = DataSearch("Test")
assert datasearch.filterString() == ''
datasearch = DataSearch('Test2', filterString='Hello World')
assert datasearch.filterString() == 'Hello World'
def test_setFilterString(self):
datasearch = DataSearch("Test")
filterString = 'foo bar'
datasearch.setFilterString(filterString)
assert datasearch.filterString() == filterString
filterString = ' foo bar '
datasearch.setFilterString(filterString)
assert datasearch.filterString() != filterString
assert datasearch.filterString() == filterString.strip()
def test_search(self, dataFrame):
datasearch = DataSearch('Test', dataFrame=dataFrame)
filterString = 'Foo < 10'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 2
filterString = 'Foo < 10 and Bar'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert not valid
filterString = '(Foo < 10) & (Bar > 1)'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 1
filterString = '(Monty < 10) & (Bar > 1)'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert not valid
def test_freeSearch(self, dataFrame):
datasearch = DataSearch('Test', dataFrame=dataFrame)
filterString = 'freeSearch("0")'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 2
filterString = 'freeSearch(1)'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert not valid
filterString = 'freeSearch("12")'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 1
def test_extentSearch(self, geoDataFrame, dataFrame):
datasearch = DataSearch('Test', dataFrame=geoDataFrame)
filterString = 'extentSearch(51, 9, 55, 14)'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 1
datasearch = DataSearch('Test', dataFrame=dataFrame)
filterString = 'extentSearch(51, 9, 55, 14)'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 0
def test_indexSearch(self, dataFrame):
datasearch = DataSearch('Test', dataFrame=dataFrame)
filterString = 'indexSearch([0])'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 1
filterString = 'indexSearch([0, 2])'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 2
filterString = 'indexSearch([0, 1, 2])'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 3
filterString = 'indexSearch([99])'
datasearch.setFilterString(filterString)
ret, valid = datasearch.search()
assert valid
assert sum(ret) == 0
if __name__ == '__main__':
pytest.main() | mit |
scls19fr/blaze | blaze/compute/tests/test_core_compute.py | 8 | 4256 | from __future__ import absolute_import, division, print_function
import pytest
import operator
from datashape import discover, dshape
from blaze.compute.core import (compute_up, compute, bottom_up_until_type_break,
top_then_bottom_then_top_again_etc,
swap_resources_into_scope)
from blaze.expr import by, symbol, Expr, Symbol
from blaze.dispatch import dispatch
from blaze.compatibility import raises, reduce
from blaze.utils import example
import pandas as pd
import numpy as np
def test_errors():
t = symbol('t', 'var * {foo: int}')
with raises(NotImplementedError):
compute_up(by(t, count=t.count()), 1)
def test_optimize():
class Foo(object):
pass
s = symbol('s', '5 * {x: int, y: int}')
@dispatch(Expr, Foo)
def compute_down(expr, foo):
return str(expr)
assert compute(s.x * 2, Foo()) == "s.x * 2"
@dispatch(Expr, Foo)
def optimize(expr, foo):
return expr + 1
assert compute(s.x * 2, Foo()) == "(s.x * 2) + 1"
def test_bottom_up_until_type_break():
s = symbol('s', 'var * {name: string, amount: int}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = (s.amount + 1).distinct()
expr, scope = bottom_up_until_type_break(e, {s: data})
amount = symbol('amount', 'var * int64', token=1)
assert expr.isidentical(amount)
assert len(scope) == 1
assert amount in scope
assert (scope[amount] == np.array([101, 201, 301], dtype='i4')).all()
# This computation has a type change midstream, so we stop and get the
# unfinished computation.
e = s.amount.sum() + 1
expr, scope = bottom_up_until_type_break(e, {s: data})
amount_sum = symbol('amount_sum', 'int64')
assert expr.isidentical(amount_sum + 1)
assert len(scope) == 1
assert amount_sum in scope
assert scope[amount_sum] == 600
# ensure that we work on binops with one child
x = symbol('x', 'real')
expr, scope = bottom_up_until_type_break(x + x, {x: 1})
assert len(scope) == 1
x2 = list(scope.keys())[0]
assert isinstance(x2, Symbol)
assert isinstance(expr, Symbol)
assert scope[x2] == 2
def test_top_then_bottom_then_top_again_etc():
s = symbol('s', 'var * {name: string, amount: int32}')
data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
dtype=[('name', 'S7'), ('amount', 'i4')])
e = s.amount.sum() + 1
assert top_then_bottom_then_top_again_etc(e, {s: data}) == 601
def test_swap_resources_into_scope():
from blaze import Data
t = Data([1, 2, 3], dshape='3 * int', name='t')
expr, scope = swap_resources_into_scope(t.head(2), {t: t.data})
assert t._resources()
assert not expr._resources()
assert t not in scope
def test_compute_up_on_dict():
d = {'a': [1, 2, 3], 'b': [4, 5, 6]}
assert str(discover(d)) == str(dshape('{a: 3 * int64, b: 3 * int64}'))
s = symbol('s', discover(d))
assert compute(s.a, {s: d}) == [1, 2, 3]
def test_pre_compute_on_multiple_datasets_is_selective():
from odo import CSV
from blaze import Data
from blaze.cached import CachedDataset
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
iris = CSV(example('iris.csv'))
dset = CachedDataset({'df': df, 'iris': iris})
d = Data(dset)
assert str(compute(d.df.amount)) == str(df.amount)
def test_raises_on_valid_expression_but_no_implementation():
class MyExpr(Expr):
__slots__ = '_hash', '_child'
@property
def dshape(self):
return self._child.dshape
t = symbol('t', 'var * {amount: real}')
expr = MyExpr(t.amount)
df = [(1.0,), (2.0,), (3.0,)]
with pytest.raises(NotImplementedError):
compute(expr, df)
@pytest.mark.parametrize('n', range(2, 11))
def test_simple_add(n):
x = symbol('x', 'int')
expr = reduce(operator.add, [x] * n)
assert compute(expr, 1) == n
| bsd-3-clause |
nipe0324/kaggle-keypoints-detection-keras | load_data.py | 1 | 1888 | import os
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from utils import reshape2d_by_image_dim_ordering
from keras import backend as K
FTRAIN = 'data/training.csv'
FTEST = 'data/test.csv'
def load(test=False, cols=None):
"""testがTrueの場合はFTESTからデータを読み込み、Falseの場合はFTRAINから読み込みます。
colsにリストが渡された場合にはそのカラムに関するデータのみ返します。
"""
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # pandasのdataframeを使用
# スペースで句切られているピクセル値をnumpy arrayに変換
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols: # カラムに関連するデータのみを抽出
df = df[list(cols) + ['Image']]
print(df.count()) # カラム毎に値が存在する行数を出力
df = df.dropna() # データが欠けている行は捨てる
X = np.vstack(df['Image'].values) / 255. # 0から1の値に変換
X = X.astype(np.float32)
if not test: # ラベルが存在するのはFTRAINのみ
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # -1から1の値に変換
X, y = shuffle(X, y, random_state=42) # データをシャッフル
y = y.astype(np.float32)
else:
y = None
return X, y
def load2d(test=False, cols=None):
X, y = load(test, cols)
# image_dim_orderring に合わせて2D画像のshapeを変える
X, _ = reshape2d_by_image_dim_ordering(X)
return X, y
if __name__ == '__main__':
X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
y.shape, y.min(), y.max()))
| apache-2.0 |
nomadcube/scikit-learn | sklearn/tree/tests/test_tree.py | 72 | 47440 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
liutairan/pyGCS | dev/GCS.py | 1 | 15660 | #!/usr/bin/pythonw
# -*- coding: UTF-8 -*-
'''
MIT License
Copyright (c) 2017 Tairan Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
from os import walk
import sys
from sys import stdout
import wx
import wx.lib.embeddedimage
import wx.dataview
import logging
import threading
from threading import Thread
from wx.lib.pubsub import pub
import serial
import serial.tools.list_ports
from pyzbMultiwii import MultiWii
from SerialCom import SerialCommunication
from DataExchange import DataExchange
from TabOne import TabOne
from TabTwo import TabTwo
from TabThree import TabThree
from TabFour import TabFour
import math
import time
import struct
import numpy
import matplotlib
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from PIL import Image
from Map import Map
#from GSMPy import GSMPy
import signal
from contextlib import contextmanager
__author__ = "Tairan Liu"
__copyright__ = "Copyright 2017, Tairan Liu"
__credits__ = ["Tairan Liu", "Other Supporters"]
__license__ = "MIT"
__version__ = "0.4-dev"
__maintainer__ = "Tairan Liu"
__email__ = "[email protected]"
__status__ = "Development"
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException, "Timed out!"
signal.signal(signal.SIGALRM, signal_handler)
signal.setitimer(signal.ITIMER_REAL, seconds)
try:
yield
finally:
signal.alarm(0)
class MainFrame(wx.Frame):
def __init__(self, *args, **kw):
super(MainFrame, self).__init__(*args, **kw)
# Data exchange handle
self.dataExchangeHandle = DataExchange()
# Mouse states
self.inWindowFlag = 0
self.inMapFlag = 0
self.leftDown = 0
self.rightDown = 0
self.shiftDown = 0
self.currentTab = 0
# Map Info
self._width = 640
self._height = 640
self._originLat = 30.408158 #37.7913838
self._originLon = -91.179533 #-79.44398934
self._zoom = 19
self._maptype = 'hybrid' #'roadmap'
self._homeLat = self._originLat
self._homeLon = self._originLon
self._dX = 0
self._dY = 0
self.waypoints = []
self.InitUI()
def InitUI(self):
self.pnl = wx.Panel(self)
self.SetSize((1150,670))
self.SetTitle("GCS")
self.SetClientSize((1150,670))
self.Center()
# Events
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_IDLE,self.OnIdle)
self.pnl.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.pnl.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.pnl.SetFocus()
#pnl.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
#pnl.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
#self.Bind(wx.EVT_CONTEXT_MENU, self.OnContext)
# Panel Elements
# Create Empty Image to preload
self.mapHandle = Map(self._originLat, self._originLon, self._zoom, self._width, self._height)
self.mapImage = self.mapHandle.retImage
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY, wx.BitmapFromImage(self.mapImage), pos=(0, 0))
# Bind Mouse Events
self.imageCtrl.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.imageCtrl.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.imageCtrl.Bind(wx.EVT_RIGHT_DOWN, self.OnMouseRightDown)
self.imageCtrl.Bind(wx.EVT_RIGHT_UP, self.OnMouseRightUp)
self.imageCtrl.Bind(wx.EVT_MOTION, self.OnMotion)
self.imageCtrl.Bind(wx.EVT_MOUSEWHEEL, self.OnScroll)
self.imageCtrl.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterMap)
self.imageCtrl.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveMap)
self.pnl.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.pnl.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.pnl.Bind(wx.EVT_RIGHT_DOWN, self.OnMouseRightDown)
self.pnl.Bind(wx.EVT_RIGHT_UP, self.OnMouseRightUp)
self.pnl.Bind(wx.EVT_MOTION, self.OnMotion)
#self.Bind(wx.EVT_MOUSEWHEEL, self.OnScroll)
self.pnl.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterWindow)
self.pnl.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.imageCtrl.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.imageCtrl.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
# Tabs
self.nb = wx.Notebook(self.pnl, pos = (642,0), size = (510, 635))
self.tab1 = TabOne(self.nb, self.dataExchangeHandle)
self.tab2 = TabTwo(self.nb, self.dataExchangeHandle)
self.tab3 = TabThree(self.nb, self.dataExchangeHandle)
self.tab4 = TabFour(self.nb, self.dataExchangeHandle)
# Add the windows to tabs and name them.
self.nb.AddPage(self.tab1, "Overview")
self.nb.AddPage(self.tab2, "Quad 1")
self.nb.AddPage(self.tab3, "Quad 2")
self.nb.AddPage(self.tab4, "Quad 3")
self.nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging)
self.nb.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.nb.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.tab1.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.tab1.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.tab2.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.tab2.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.tab3.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.tab3.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.tab4.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.tab4.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
#
#self.imageCtrl.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
#self.imageCtrl.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
# Button Events
#buttonStaticBox = wx.StaticBox(pnl, -1, 'Buttons', pos = (645,0), size = (300,240))
#flightStaticBox = wx.StaticBox(pnl, -1, 'Flight Data', pos = (645,245), size = (300,240))
# Buttons
self.incZoomButton = wx.Button(self.pnl, -1, '+', pos = (2, 642), size = (25,20))
self.Bind(wx.EVT_BUTTON, self.OnIncZoom, self.incZoomButton)
self.decZoomButton = wx.Button(self.pnl, -1, '-', pos = (27, 642), size = (25,20))
self.Bind(wx.EVT_BUTTON, self.OnDecZoom, self.decZoomButton)
self.autoZoomButton = wx.Button(self.pnl, -1, 'Auto Zoom', pos = (55, 642), size = (85,20))
self.Bind(wx.EVT_BUTTON, self.OnAutoZoom, self.autoZoomButton)
self.retHomeButton = wx.Button(self.pnl, -1, 'Return Home', pos = (143, 642), size = (95,20))
self.Bind(wx.EVT_BUTTON, self.OnReturnHome, self.retHomeButton)
self.zoomlevelLabel = wx.StaticText(self, -1, 'Zoom Level: '+str(self._zoom), pos = (245,647), size = (40,20))
# Show
self.Show(True)
def OnQuitApp(self, event):
self.Close()
def OnPageChanged(self, event):
self.currentTab = event.GetSelection()
self.dataExchangeHandle.serialMode = self.currentTab
event.Skip()
def OnPageChanging(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.nb.GetSelection()
#print 'OnPageChanging, old:%d, new:%d, sel:%d\n' % (old, new, sel)
event.Skip()
def OnKeyDown(self, event):
keyNumber = event.GetKeyCode()
print(keyNumber)
if keyNumber == wx.WXK_SHIFT:
self.shiftDown = 1
def OnKeyUp(self, event):
keyNumber = event.GetKeyCode()
if keyNumber == 27:
self.Close()
elif keyNumber == wx.WXK_SHIFT:
self.shiftDown = 0
def OnIncZoom(self, event):
self._zoom = self._zoom + 1
self.zoomlevelLabel.SetLabel('Zoom Level: '+str(self._zoom))
self.mapHandle.zoom(1)
self.Refresh()
def OnDecZoom(self, event):
self._zoom = self._zoom - 1
self.zoomlevelLabel.SetLabel('Zoom Level: '+str(self._zoom))
self.mapHandle.zoom(-1)
self.Refresh()
def OnAutoZoom(self, event):
# auto select the zoom level so that all the waypoints are shown in the frame
# if no gps coordinates or waypoints, then do nothing
existGPSs = self.dataExchangeHandle._currentGPS
existWPs = self.dataExchangeHandle._waypointLists
self._auto_zoom_and_center(existGPSs, existWPs)
self.Refresh()
def _auto_zoom_and_center(self, gps, wps):
tempGPSList = []
latList = []
lonList = []
for dev in range(3):
if len(gps[dev]) > 0:
tempGPSList.append(gps[dev])
latList.append(gps[dev][0])
lonList.append(gps[dev][1])
tempList = wps[dev]
if len(tempList) > 0:
for i in range(len(tempList)):
tempWP = tempList[i]
tempGPSList.append([tempWP['lat'], tempWP['lon']])
latList.append(tempWP['lat'])
lonList.append(tempWP['lon'])
if len(tempGPSList) >= 2:
max_lat = max(latList)
min_lat = min(latList)
max_lon = max(lonList)
min_lon = min(lonList)
[_center_lat, _center_lon, _zoomlevel] = self.mapHandle._find_zoomlevel(min_lat, max_lat, min_lon, max_lon)
self._zoom = _zoomlevel
self.zoomlevelLabel.SetLabel('Zoom Level: '+str(self._zoom))
self.mapHandle._reload(_center_lat, _center_lon, _zoomlevel)
self.Refresh()
def OnReturnHome(self, event):
self.mapHandle.return_origin()
self.Refresh()
def OnSize(self, event):
event.Skip()
self.Refresh()
def OnPaint(self, event):
self.mapImage = self.mapHandle.retImage
tempImage = wx.BitmapFromImage(self.mapImage)
self.dc = wx.MemoryDC(tempImage)
self.dc.SetPen(wx.Pen("BLACK", style = wx.TRANSPARENT))
try:
for dev in range(3):
self.dc.SetPen(wx.Pen("BLACK", style = wx.TRANSPARENT))
tempList = self.dataExchangeHandle._waypointLists[dev]
if len(tempList) > 0:
for i in range(len(tempList)):
tempWP = tempList[i]
x,y = self.mapHandle.GPStoImagePos(tempWP['lat'], tempWP['lon'])
if i == 0:
self.dc.SetBrush(wx.Brush("BLUE", wx.SOLID))
self.dc.DrawCircle(x, y, 7)
self.dc.DrawText(str(dev+1), x, y)
else:
self.dc.SetBrush(wx.Brush("RED", wx.SOLID))
self.dc.DrawCircle(x, y, 7)
for i in range(len(tempList)):
if i < len(tempList)-1:
tempWP = tempList[i]
x,y = self.mapHandle.GPStoImagePos(tempWP['lat'], tempWP['lon'])
tempWP_next = tempList[i+1]
x_n,y_n = self.mapHandle.GPStoImagePos(tempWP_next['lat'], tempWP_next['lon'])
self.dc.SetPen(wx.Pen(wx.Colour(dev*60,255,255-dev*60), 1))
self.dc.DrawLines(((x, y),(x_n, y_n)))
else:
tempWP = tempList[i]
x,y = self.mapHandle.GPStoImagePos(tempWP['lat'], tempWP['lon'])
tempWP_next = tempList[0]
x_n,y_n = self.mapHandle.GPStoImagePos(tempWP_next['lat'], tempWP_next['lon'])
self.dc.SetPen(wx.Pen(wx.RED, 1))
self.dc.DrawLines(((x,y),(x_n, y_n)))
else:
pass
except:
pass
# draw current GPS if available
try:
for dev in range(3):
self.dc.SetPen(wx.Pen("BLACK", style = wx.TRANSPARENT))
tempGPSs = self.dataExchangeHandle._currentGPS[dev]
if len(tempGPSs) > 0:
x, y = self.mapHandle.GPStoImagePos(tempGPSs[0], tempGPSs[1])
self.dc.SetBrush(wx.Brush("YELLOW", wx.SOLID))
self.dc.DrawCircle(x, y, 10)
self.dc.DrawText(str(dev+1), x-2, y-4)
else:
pass
except:
pass
self.dc.SelectObject(wx.NullBitmap)
self.imageCtrl.SetBitmap(tempImage)
def OnIdle(self,event):
self.Refresh()
def OnEnterWindow(self, event):
#print('Enter Window')
self.inWindowFlag = 1
def OnEnterMap(self, event):
#print('Enter Map')
self.pnl.SetFocus()
self.inMapFlag = 1
def OnLeaveWindow(self, event):
#print('Leave Window')
self.inWindowFlag = 0
def OnLeaveMap(self, event):
#print('Leave Map')
self.inMapFlag = 0
def OnMouseLeftDown(self, event):
#print('left down')
self.leftDown = 1
self.mouseX, self.mouseY = event.GetPosition()
def OnMouseLeftUp(self, event):
#print('left up')
self.leftDown = 0
def OnMouseRightDown(self, event):
#print('right down')
self.rightDown = 1
self.mouseX, self.mouseY = event.GetPosition()
_point_lat, _point_lon = self.mapHandle.PostoGPS(self.mouseX, self.mouseY)
_point_x, _point_y = self.mapHandle.GPStoImagePos(_point_lat, _point_lon)
#print(self.currentTab)
if self.currentTab == 1:
self.tab2.OnAdd(_point_lat, _point_lon)
if self.currentTab == 2:
self.tab3.OnAdd(_point_lat, _point_lon)
if self.currentTab == 3:
self.tab4.OnAdd(_point_lat, _point_lon)
self.Refresh()
def OnMouseRightUp(self, event):
#print('right up')
self.rightDown = 0
def OnMotion(self, event):
x, y = event.GetPosition()
#print(x,y)
if self.inMapFlag == 1 and self.leftDown == 1:
dx = x-self.mouseX
dy = y-self.mouseY
self.mapHandle.move(dx, dy)
self.Refresh()
def OnScroll(self, event):
dlevel = event.GetWheelRotation()
#self.mapHandle.zoom(dlevel/20)
# +: Down/Left, -: Up/Right
self.Refresh()
def InPointArea(self, x, y):
if self.currentTab == 1:
pass
if self.currentTab == 2:
pass
if self.currentTab == 3:
pass
def main():
map = wx.App()
MainFrame(None)
map.MainLoop()
if __name__ == "__main__":
main()
| mit |
sharadmv/trees | experiments/benchmark/create_offline_graph.py | 1 | 3888 | from cStringIO import StringIO
from Bio import Phylo
import matplotlib
import matplotlib.pyplot as plt
import random
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import numpy as np
import trees
from trees.ddt import DirichletDiffusionTree, Inverse, GaussianLikelihoodModel
from trees.mcmc import MetropolisHastingsSampler
matplotlib.rcParams.update({'font.size': 48})
from trees.util import plot_tree_2d
import seaborn as sns
sns.set_style('white')
from tqdm import tqdm
from sklearn.decomposition import PCA
import cPickle as pickle
pca = PCA(2)
dataset = trees.data.load('zoo')
X, y = dataset.X, dataset.y
X += np.random.normal(scale=0.01, size=X.shape)
pca.fit(X)
N, D = X.shape
with open('../../scripts/zoo.tree', 'rb') as fp:
master_tree = pickle.load(fp)
master_constraints = list(master_tree.generate_constraints())
random.seed(0)
random.shuffle(master_constraints)
train_constraints, test_constraints = master_constraints[:200], master_constraints[200:]
test_constraints = test_constraints[:10000]
df = Inverse(c=0.9)
lm = GaussianLikelihoodModel(sigma=np.eye(D) / 4.0, sigma0=np.eye(D) / 2.0, mu0=X.mean(axis=0)).compile()
models = {
'No constraints': DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=[]),
# '10 constraints': DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=train_constraints[:10]),
'50 constraints': DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=train_constraints[:50]),
'100 constraints': DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=train_constraints[:100]),
# '150 constraints': DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=train_constraints[:150]),
'200 constraints': DirichletDiffusionTree(df=df, likelihood_model=lm, constraints=train_constraints),
}
samplers = {
a : MetropolisHastingsSampler(d, X) for a, d in models.iteritems()
}
for sampler in samplers.values():
sampler.initialize_assignments()
score_every = 1000
def iterate(n_iters):
scores = {a: [] for a in samplers}
likelihoods = {a: [] for a in samplers}
for i in tqdm(xrange(n_iters)):
for name, sampler in samplers.items():
sampler.sample()
likelihoods[name].append(sampler.tree.marg_log_likelihood())
if i % score_every == 0:
scores[name].append(float(sampler.tree.score_constraints(test_constraints))
/ len(test_constraints))
for name, sampler in samplers.items():
scores[name].append(float(sampler.tree.score_constraints(test_constraints))
/ len(test_constraints))
return scores, likelihoods
n_iters = 100000
scores, likelihoods = iterate(n_iters)
fontsize = 16
plt.figure()
plt.ylim([0, 1])
plt.xlim([0, n_iters])
plt.xlabel("Iterations", fontsize=fontsize)
plt.ylabel("Constraint Score", fontsize=fontsize)
for name, score in scores.items():
plt.plot(np.arange(0, n_iters + score_every, score_every), score, label=name)
plt.legend(loc='best', fontsize=12)
plt.savefig('offline-scores.png', bbox_inches='tight')
plt.figure()
plt.xlim([0, n_iters])
# plt.ylim(ymin=-400)
plt.xlabel("Iterations", fontsize=fontsize)
plt.ylabel("Data Log Likelihood", fontsize=fontsize)
for name, likelihood in likelihoods.items():
plt.plot(likelihood, label=name)
plt.legend(loc='best', fontsize=12)
plt.savefig('offline-likelihoods.png', bbox_inches='tight')
for type, model in models.items():
final_tree = model.copy()
plt.figure()
plot_tree_2d(final_tree, X, pca)
for node in final_tree.dfs():
if node.is_leaf():
node.point = y[node.point]
newick = final_tree.to_newick()
tree = Phylo.read(StringIO(newick), 'newick')
Phylo.draw_graphviz(tree, prog='neato')
plt.savefig('tree-%s.png' % type, bbox_inches='tight')
plt.show()
| mit |
steinnp/Big-Data-Final | Classification/bayes_most_informative.py | 1 | 3013 | import nltk
import csv
import matplotlib.pyplot as plt
word_features = []
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def extract_features(document):
document_words = set(document)
features = {}
global word_features
for word in word_features:
features[word] = (word in document_words)
return features
def get_most_informative_features_with_values(clf, n=100):
# Determine the most relevant features, and display them.
cpdist = clf._feature_probdist
to_return = []
print('Most Informative Features')
for (fname, fval) in clf.most_informative_features(n):
def labelprob(l):
return cpdist[l, fname].prob(fval)
labels = sorted([l for l in clf._labels
if fval in cpdist[l, fname].samples()],
key=labelprob)
if len(labels) == 1:
continue
l0 = labels[0]
l1 = labels[-1]
if cpdist[l0, fname].prob(fval) == 0:
ratio = 'INF'
else:
ratio = float((cpdist[l1, fname].prob(fval) / cpdist[l0, fname].prob(fval)))
if l0 == 'pos':
ratio = ratio * -1
to_return.append((fname, ratio))
return to_return
def plot_most_important_words(tweets, predicts):
labels = []
new_tweets = []
for i, la in enumerate(predicts):
if la == 0:
new_tweets.append(tweets[i])
labels.append('neg')
if la == 1:
pass
if la == 2:
new_tweets.append(tweets[i])
labels.append('pos')
train = list(zip(new_tweets, labels))
tweets = []
for (words, sentiment) in train:
words_filtered = [e.lower() for e in words.split() if len(e) >= 3]
tweets.append((words_filtered, sentiment))
global word_features
word_features = get_word_features(get_words_in_tweets(tweets))
training_set = nltk.classify.apply_features(extract_features, tweets)
# training_set = nltk.classify.apply_features(word_features, tweets)
clf = nltk.NaiveBayesClassifier.train(training_set)
mostinf = get_most_informative_features_with_values(clf, 20)
# mostinf = clf.get_most_informative_features_with_values(20)
mostinf = sorted(mostinf, key=lambda x: x[1])
words = [i[0] for i in mostinf]
values = [i[1] for i in mostinf]
x_range = [i for i in range(len(words))]
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(1, 1, 1)
colors = ['red' if v < 0 else 'green' for v in values]
values = sorted([abs(n) for n in values])
ax.barh(x_range, values, align='center', color=colors)
ax.set_yticks(x_range)
ax.set_yticklabels(words)
ax.set_xlabel('Word impact')
plt.title("Most informative features")
#plt.show()
| mit |
dinossimpson/pyspeckit | pyspeckit/spectrum/models/radex_modelgrid.py | 3 | 4499 | """
Fit a line based on parameters output from a grid of RADEX models
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from .. import units
from . import fitter,model
import matplotlib.cbook as mpcb
import copy
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
class radex_model(object):
def __init__(self, xarr,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
modelfunc=None,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s. With is 'sigma'
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
A modelfunc must be specified. Model functions should take an xarr and
a series of keyword arguments corresponding to the line parameters
(Tex, tau, xoff_v, and width (gaussian sigma, not FWHM))
"""
self.modelfunc = modelfunc
if self.modelfunc is None:
raise ValueError("Must specify a spectral model function. See class help for form.")
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
self.taugrid = [pyfits.getdata(path_to_taugrid)]
self.texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
self.yinds,self.xinds = np.indices(self.taugrid[0].shape[1:])
self.densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
self.columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
self.minfreq = (4.8,)
self.maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
self.minfreq,self.maxfreq,self.texgrid = zip(*texgrid)
self.minfreq,self.maxfreq,self.taugrid = zip(*taugrid)
self.yinds,self.xinds = np.indices(self.taugrid[0].shape[1:])
self.densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
self.columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
self.xarr = copy.copy(xarr)
self.xarr.convert_to_unit('Hz', quiet=True)
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if debug:
import pdb; pdb.set_trace()
def __call__(self, density=4, column=13, xoff_v=0.0, width=1.0,):
self.gridval1 = np.interp(density, self.densityarr[0,:], xinds[0,:])
self.gridval2 = np.interp(column, self.columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[self.gridval2],[self.gridval1]]),order=1) for tg in self.taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[self.gridval2],[self.gridval1]]),order=1) for tg in self.texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
if verbose:
print "density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex)
if debug:
import pdb; pdb.set_trace()
return self.modelfunc(self.xarr,Tex=self.tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
| mit |
r3kall/AnimeRecommenderSystem | animerecommendersystem/evaluation/training_testing_cf.py | 1 | 7694 |
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
from animerecommendersystem.utils import definitions
from animerecommendersystem.data_processing.user_cluster_matrix import read_user_item_json
from animerecommendersystem.data_processing.item_cluster_matrix import build_item_feature_matrix
def build_user_item_sparse_matrix(filename):
user_item = read_user_item_json(filename)
item_feature, pos_to_id, id_to_pos = build_item_feature_matrix()
N = len(user_item.keys())
M = item_feature.shape[0]
user_item_sparse = np.zeros((N, M), dtype=np.uint16)
user_mean = np.zeros(N)
user_count = 0
# user_count correspond to the i-th key
# of the iteration over user_item keys
for user in user_item.keys():
sumrank = 0.
relevant = 0.
for item_id, values in user_item[user]['list'].iteritems():
try:
item_index = id_to_pos[int(item_id)]
except KeyError:
continue
rate = values['rate']
user_item_sparse[user_count, item_index] = rate
if rate > 0:
relevant += 1
sumrank += rate
if relevant > 0:
user_mean[user_count] = sumrank / relevant
else:
user_mean[user_count] = 5.5
user_count += 1
return user_item_sparse, user_mean
def k_neighbors(sparse_matrix, k):
# create the k-neighbors unsupervised model
model = NearestNeighbors(n_neighbors=k+1, metric='cosine',
algorithm='brute', n_jobs=1)
model.fit(sparse_matrix) # train the model
# compute k-neighbors, returning the index and distance matrices
distances, indices = model.kneighbors(sparse_matrix,
return_distance=True)
return distances[:, 1:], indices[:, 1:]
def evaluate(user_item_sparse, neighbors, distances, mean_rates):
relevant_counter = 0 # items with rate != 0 and that exist in the neighbors
total_counter = 0 # items with rate != 0
mae_list = []
rmse_list = []
for u in range(user_item_sparse.shape[0]):
for it in range(user_item_sparse.shape[1]):
if user_item_sparse[u, it] != 0:
total_counter += 1
pred_num = 0.
pred_dem = 0.
for n in range(neighbors.shape[1]):
neigh_rate = user_item_sparse[neighbors[u, n], it]
if neigh_rate > 0:
sim = (1. - distances[u, n])
pred_num += sim * (
neigh_rate - mean_rates[neighbors[u, n]]
)
pred_dem += sim
if pred_dem == 0:
# print "\nNo item in neigh"
# prediction = user_item_matrix[user]['mean_rate']
continue
else:
prediction = mean_rates[u] + (pred_num / pred_dem)
# prediction = pred_num / pred_dem
relevant_counter += 1
if prediction < 1.:
prediction = 1.
if prediction > 10.:
prediction = 10.
mae_list.append(np.abs(prediction - float(user_item_sparse[u, it])))
rmse_list.append((prediction - float(user_item_sparse[u, it])) ** 2)
# print "prediction: %f \t true: %d" % (prediction, attributes['rate'])
not_found_ratio = 1. - (float(relevant_counter) / float(total_counter))
return np.array(mae_list), np.array(rmse_list), not_found_ratio
def cf_k_fold_rmse(k=5):
"""Perform k-fold cross validation, with k=5"""
# these are the same for all iterations
# item_feature, pos_to_id, id_to_pos = build_item_feature_matrix()
trn_mae_list = []
trn_rmse_list = []
trn_not_found_ratio_list = []
tst_mae_list = []
tst_rmse_list = []
tst_not_found_ratio_list = []
# TODO execute with 5 splits
for i in range(2):
trn_filename = os.path.join(definitions.FILE_DIR,
"user_item_train_"+str(i)+".json")
tst_filename = os.path.join(definitions.FILE_DIR,
"user_item_test_"+str(i)+".json")
trn_user_item_sparse, trn_mean_rates = build_user_item_sparse_matrix(trn_filename)
t0 = time.time()
distances, indices = k_neighbors(trn_user_item_sparse, k)
print "Iteration %d, time to compute neighbors: %f seconds" % (i, time.time() - t0)
"""
trn_mae, trn_rmse, trn_not_found_ratio = evaluate(
trn_user_item_sparse, indices, distances, trn_mean_rates)
del trn_user_item_sparse
del trn_mean_rates
print "First Evaluation done, iteration %d" % i
"""
tst_user_item_sparse, tst_mean_rates = build_user_item_sparse_matrix(tst_filename)
"""
tst_mae, tst_rmse, tst_not_found_ratio = evaluate(
tst_user_item_sparse, indices, distances, tst_mean_rates)
"""
# del tst_user_item_sparse
# del tst_mean_rates
trn_mae_list.append(np.mean([1, 2]))
trn_rmse_list.append(np.sqrt(np.mean([1, 2])))
trn_not_found_ratio_list.append(0.1)
tst_mae_list.append(np.mean([1, 2]))
tst_rmse_list.append(np.sqrt(np.mean([1, 2])))
tst_not_found_ratio_list.append(0.1)
return np.mean(trn_mae_list), np.mean(trn_rmse_list), np.mean(trn_not_found_ratio_list), \
np.mean(tst_mae_list), np.mean(tst_rmse_list), np.mean(tst_not_found_ratio_list)
def compute_evaluation():
train_mae_list = []
train_rmse_list = []
test_mae_list = []
test_rmse_list = []
parameters = [3, 5, 7, 10, 30, 50, 75, 100]
parameters = [1000, 2000, 5000]
min_rmse = 1000.
target_k = 0
for t in parameters:
print "=" * 71
print "K = %d" % t
tr_mae, tr_rmse, tr_ratio, ts_mae, ts_rmse, ts_ratio = cf_k_fold_rmse(k=t)
"""
print "\nTraining Set"
print "Not Found Ratio: %f" % tr_ratio
print "MAE: %s" % str(tr_mae)
print "RMSE: %s" % str(tr_rmse)
"""
print "\nTest Set"
print "Not Found Ratio: %f" % ts_ratio
print "MAE: %s" % str(ts_mae)
print "RMSE: %s" % str(ts_rmse)
print "\nMAE Training/Test difference: %f" % (np.abs(tr_mae - ts_mae))
print "RMSE Training/Test difference: %f" % (np.abs(tr_rmse - ts_rmse))
test_mae_list.append(ts_mae)
test_rmse_list.append(ts_rmse)
if ts_rmse < min_rmse:
min_rmse = ts_rmse
target_k = t
print "=" * 71
print "Min Test RMSE: %s" % str(min_rmse)
print "Target K: %d" % target_k
return test_mae_list, test_rmse_list, parameters
def draw():
test_mae_list, test_rmse_list, p = compute_evaluation()
# plot MAE with fixed clusters
plt.figure()
plt.plot(p,
test_mae_list,
'r',
p,
test_mae_list,
'rs', lw=2)
plt.ylabel("MAE")
plt.xlabel("number of neighbors")
plt.title("Collaborative Filtering")
plt.grid(True)
# plot RMSE with fixed clusters
plt.figure()
plt.plot(p,
test_rmse_list,
'k',
p,
test_rmse_list,
'ks', lw=2)
plt.ylabel("RMSE")
plt.xlabel("number of neighbors")
plt.title("Collaborative Filtering")
plt.grid(True)
plt.show()
if __name__ == '__main__':
draw() | gpl-3.0 |
danforthcenter/plantcv | plantcv/plantcv/morphology/segment_angle.py | 2 | 3251 | # Find angles in degrees of skeleton segments
import os
import cv2
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import color_palette
def segment_angle(segmented_img, objects, label="default"):
""" Calculate angle of segments (in degrees) by fitting a linear regression line to segments.
Inputs:
segmented_img = Segmented image to plot slope lines and angles on
objects = List of contours
label = optional label parameter, modifies the variable name of observations recorded
Returns:
labeled_img = Segmented debugging image with angles labeled
:param segmented_img: numpy.ndarray
:param objects: list
:param label: str
:return labeled_img: numpy.ndarray
"""
label_coord_x = []
label_coord_y = []
segment_angles = []
labeled_img = segmented_img.copy()
# Use a previously saved color scale if available
rand_color = color_palette(num=len(objects), saved=True)
for i, cnt in enumerate(objects):
# Find bounds for regression lines to get drawn
rect = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rect)
df = pd.DataFrame(pts, columns=('x', 'y'))
x_max = int(df['x'].max())
x_min = int(df['x'].min())
# Find line fit to each segment
[vx, vy, x, y] = cv2.fitLine(objects[i], cv2.DIST_L2, 0, 0.01, 0.01)
slope = -vy / vx
left_list = int(((x - x_min) * slope) + y)
right_list = int(((x - x_max) * slope) + y)
if slope > 1000000 or slope < -1000000:
print("Slope of contour with ID#", i, "is", slope, "and cannot be plotted.")
else:
# Draw slope lines
cv2.line(labeled_img, (x_max - 1, right_list), (x_min, left_list), rand_color[i], 1)
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
# Calculate degrees from slopes
segment_angles.append(np.arctan(slope[0]) * 180 / np.pi)
segment_ids = []
for i, cnt in enumerate(objects):
# Label slope lines
w = label_coord_x[i]
h = label_coord_y[i]
text = "{:.2f}".format(segment_angles[i])
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
# segment_label = "ID" + str(i)
segment_ids.append(i)
outputs.add_observation(sample=label, variable='segment_angle', trait='segment angle',
method='plantcv.plantcv.morphology.segment_angle', scale='degrees', datatype=list,
value=segment_angles, label=segment_ids)
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented_angles.png'))
elif params.debug == 'plot':
plot_image(labeled_img)
return labeled_img
| mit |
saskartt/kandi | plotProfiles.py | 1 | 7136 | #!/usr/bin/env python
import sys
import argparse
import numpy as np
from scipy.optimize import leastsq
import matplotlib as mpl
mpl.use('GTKCairo')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib import rc
from itertools import cycle
from settings import *
from kandiLib import *
'''
Plot time-averaged profiles.
'''
#==========================================================#
parser = argparse.ArgumentParser(
prog='plotProfiles.py', description='''Plot time-averaged profiles.''')
parser.add_argument("-f", "--files", type=str, nargs='+',default=None,
help="Name of the input netCDF4 files.")
parser.add_argument("-d", "--domains", type=str, nargs='+',
default=['0'], help="Statistical domains to process. Default: 00")
parser.add_argument("-v", "--variable", type=str,
default="u", help="Variable to be plotted.")
parser.add_argument("-s", "--save", type=str, help="Save resulting figure as.")
parser.add_argument("-ft", "--fit", type=int, nargs=2, default=[30,60], help="Range of vertical grid points to fit to.")
parser.add_argument("-x", "--xlims", type=float, nargs=2, help="Set x axis limits manually.")
parser.add_argument("-y", "--ylims", type=float, nargs=2, help="Set y axis limits manually.")
args = parser.parse_args()
#==========================================================#
mpl.rcParams["mathtext.fontset"] ="cm"
# Read all datasets into a list
dsList = []; tpList = {}; nameList = {}
for fname in args.files:
ds = openDataSet(fname)
nameList[ds] = fname
dsList.append(ds)
t_inds, = np.where(np.logical_and(ds.variables['time'][:] >= timespan[0], ds.variables['time'][:] <= timespan[1]))
tpList[ds] = t_inds
plt.figure(1)
plt.grid()
axes = plt.gca()
if (args.xlims):
axes.set_xlim(args.xlims)
if (args.ylims):
axes.set_ylim(args.ylims)
else:
axes.set_ylim([0, 128])
plt.ylabel("$z\/\mathrm{(m)}$",fontsize=14)
color_cycle = ['b', 'g', 'r', 'c', 'm', 'y', 'fuchsia', 'gold', 'orange', 'lightcoral', 'lightslategrey','tan']
i=0
for ds in dsList:
for domain in args.domains:
if (args.variable == "u"):
datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[0]
plt.xlabel("$\mathbf{u}\/\mathrm{(m/s)}$",fontsize=14)
elif (args.variable == "v"):
datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[1]
plt.xlabel("$\mathbf{v}\/\mathrm{(m/s)}$",fontsize=14)
elif (args.variable == "w"):
datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[2]
plt.xlabel("$\mathbf{w}\/\mathrm{(m/s)}$",fontsize=14)
elif (args.variable == "U"):
datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[3]
plt.xlabel("$\mathbf{U}\/\mathrm{(m/s)}$",fontsize=14)
elif (args.variable == "u*2"):
datalist=averageProfilesVariances(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[0]
plt.xlabel("$\sigma^2_{u}\/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "v*2"):
datalist=averageProfilesVariances(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[1]
plt.xlabel("$\sigma^2_{v}\/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "w*2"):
datalist=averageProfilesVariances(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[2]
plt.xlabel("$\sigma^2_{w}\/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "wu"):
datalist=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[0]
plt.xlabel("$\overline{u'w'}\/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "wv"):
datalist=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[1]
plt.xlabel("$\overline{v'w'}\/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "flux"):
datalist=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[2]
plt.xlabel(r"$\overline{u'w'}+ \overline{v'w'} \/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "tke"):
datalist=averageProfilesTKE(domain, tpList[ds], pr_heights_plot, ds)
pr = datalist[0]
plt.xlabel("$TKE\/\mathrm{(m^2/s^2)}$",fontsize=14)
elif (args.variable == "u*"):
datalist=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)
pr1 = np.power(datalist[0],2.0)
pr2 = np.power(datalist[1],2.0)
pr = np.power(pr1+pr2,0.25)
elif (args.variable == "z0"):
flux=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)[2]
fric_vel= np.sqrt(np.polyfit(pr_heights_plot[20:80]/128.-1,flux[20:80],1)[0])
hwind = datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)
hwind = hwind[3]
disp_height = 30.-((60.-30.)/(np.exp(0.41*((hwind[60]/fric_vel)-(hwind[30]/fric_vel)))-1))
print("Displacement height: {}".format(disp_height))
uu = (hwind/fric_vel)
pr = (pr_heights_plot-disp_height)*np.exp(-uu*0.41)
pr_heights_plot = pr_heights_plot
elif (args.variable == "wplog"):
flux1=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)[0]
flux2=averageProfilesMomentumFluxes(domain, tpList[ds], pr_heights_plot, ds)[1]
flux = (flux1**2.0 + flux2**2.0)**0.25
fric_vel= np.mean(flux[args.fit[0]:args.fit[1]])
print(fric_vel)
datalist=averageProfilesWS(domain, tpList[ds], pr_heights_plot, ds)
hwind = datalist[3]
plt.xlabel("$\mathbf{u}/\mathbf{u_*}\/\mathrm{(m/s)}$",fontsize=14)
plt.plot(hwind/fric_vel,pr_heights_plot, label=r'Run: {}, simulated'.format(nameList[ds][4], domain),color=color_cycle[i])
z=pr_heights_plot[args.fit[0]:args.fit[1]]
u_profile = hwind[args.fit[0]:args.fit[1]]
funcLogProfile = lambda val,z : (fric_vel/0.41)*np.log((z-val[1])/val[0])
ErrorFunc = lambda val,z,pr: np.abs(funcLogProfile(val,z)-u_profile)
valInitial=(1.0,0.0)
valFinal,success = leastsq(ErrorFunc,valInitial[:],args=(z,u_profile))
print("Least squrares fit: {}".format(valFinal))
np.seterr(invalid='ignore')
pr=(1./0.41)*np.log((pr_heights_plot-valFinal[1])/valFinal[0])
# plt.plot(pr,pr_heights_plot, label=r'Run: {}, logprofile'.format(nameList[ds][4:], domain))
else:
raise NameError("Unknown variable "+args.variable)
if (args.ylims):
axes.set_ylim([args.ylims[0],args.ylims[1]])
if (args.xlims):
axes.set_xlim([args.xlims[0],args.xlims[1]])
plt.plot(pr,pr_heights_plot, label=r'Run: {}, log profile'.format(nameList[ds][4]), linestyle='--', color=color_cycle[i])
i=i+1
#axes.fill_between(np.linspace(0,12.0), 16, 32, facecolor='yellow', alpha=0.3,
# label='Roof level < h < 0.3*BLH')
leg = plt.legend(loc=0, fontsize=9)
for legobj in leg.legendHandles:
legobj.set_linewidth(2.0)
if (args.save):
plt.savefig(args.save)
print("Figure {} saved.".format(args.save))
plt.show()
| mit |
fzalkow/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
AsaWilks/diabetes_test | diabetes.py | 1 | 1326 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
me = "asa"
twome = me*2
print(twome)
x = (1,2,3,4,5)
y = (3,4,5,6,7)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show() | mit |
nvoron23/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 56 | 37976 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
josesho/bootstrapContrast | bootstrap_contrast/plot_tools.py | 2 | 4118 | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from .misc_tools import merge_two_dicts
def halfviolin(v, half = 'right', color = 'k'):
for b in v['bodies']:
mVertical = np.mean(b.get_paths()[0].vertices[:, 0])
mHorizontal = np.mean(b.get_paths()[0].vertices[:, 1])
if half is 'left':
b.get_paths()[0].vertices[:, 0] = np.clip(b.get_paths()[0].vertices[:, 0], -np.inf, mVertical)
if half is 'right':
b.get_paths()[0].vertices[:, 0] = np.clip(b.get_paths()[0].vertices[:, 0], mVertical, np.inf)
if half is 'bottom':
b.get_paths()[0].vertices[:, 1] = np.clip(b.get_paths()[0].vertices[:, 1], -np.inf, mHorizontal)
if half is 'top':
b.get_paths()[0].vertices[:, 1] = np.clip(b.get_paths()[0].vertices[:, 1], mHorizontal, np.inf)
b.set_color(color)
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# Taken from
# http://stackoverflow.com/questions/7630778/matplotlib-align-origin-of-right-axis-with-specific-left-axis-value
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def rotate_ticks(axes, angle=45, alignment='right'):
for tick in axes.get_xticklabels():
tick.set_rotation(angle)
tick.set_horizontalalignment(alignment)
def plot_means(data,x,y,ax=None,xwidth=0.5,zorder=1,linestyle_kw=None):
"""Takes a pandas DataFrame and plots the `y` means of each group in `x` as horizontal lines.
Keyword arguments:
data: pandas DataFrame.
This DataFrame should be in 'wide' format.
x,y: string.
x and y columns to be plotted.
xwidth: float, default 0.5
The horizontal spread of the line. The default is 0.5, which means
the mean line will stretch 0.5 (in data coordinates) on both sides
of the xtick.
zorder: int, default 1
This is the plot order of the means on the axes.
See http://matplotlib.org/examples/pylab_examples/zorder_demo.html
linestyle_kw: dict, default None
Dictionary with kwargs passed to the `meanprops` argument of `plt.boxplot`.
"""
# Set default linestyle parameters.
default_linestyle_kw=dict(
linewidth=1.5,
color='k',
linestyle='-')
# If user has specified kwargs for linestyle, merge with default params.
if linestyle_kw is None:
meanlinestyle_kw=default_linestyle_kw
else:
meanlinestyle_kw=merge_two_dicts(default_linestyle_kw,linestyle_kw)
# Set axes for plotting.
if ax is None:
ax=plt.gca()
# Use sns.boxplot to create the mean lines.
sns.boxplot(data=data,
x=x,y=y,
ax=ax,
showmeans=True,
meanline=True,
showbox=False,
showcaps=False,
showfliers=False,
whis=0,
width=xwidth,
zorder=int(zorder),
meanprops=meanlinestyle_kw,
medianprops=dict(linewidth=0)
)
def plot_std(data, x, y, offset=0, ax=None, **kwargs):
'''Convenience function to plot the standard devations as vertical
errorbars.'''
if ax is None:
ax = plt.gca()
keys = kwargs.keys()
if 'zorder' not in keys:
kwargs['zorder'] = 5
if 'lw' not in keys:
kwargs['lw'] = 2.25,
if 'color' not in keys:
kwargs['color'] = 'k'
if 'alpha' not in keys:
kwargs['alpha'] = 0.5
num_groups = len(data[x].unique())
ax.errorbar(x=np.array(range(0, num_groups)) + offset,
y=data.groupby(x)[y].mean().tolist(),
yerr=data.groupby(x)[y].std().tolist(),
fmt='none',
**kwargs)
| gpl-3.0 |
Sentient07/scikit-learn | sklearn/ensemble/forest.py | 8 | 67993 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter4/fig_GMM_1D.py | 3 | 4324 | """
1D Gaussian Mixture Example
---------------------------
Figure 4.2.
Example of a one-dimensional Gaussian mixture model with three components.
The left panel shows a histogram of the data, along with the best-fit model
for a mixture with three components. The center panel shows the model selection
criteria AIC (see Section 4.3) and BIC (see Section 5.4) as a function of the
number of components. Both are minimized for a three-component model. The
right panel shows the probability that a given point is drawn from each class
as a function of its position. For a given x value, the vertical extent of
each region is proportional to that probability. Note that extreme values
are most likely to belong to class 1.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
import numpy as np
from sklearn.mixture import GMM
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the dataset.
# We'll use scikit-learn's Gaussian Mixture Model to sample
# data from a mixture of Gaussians. The usual way of using
# this involves fitting the mixture to data: we'll see that
# below. Here we'll set the internal means, covariances,
# and weights by-hand.
np.random.seed(1)
gmm = GMM(3, n_iter=1)
gmm.means_ = np.array([[-1], [0], [3]])
gmm.covars_ = np.array([[1.5], [1], [0.5]]) ** 2
gmm.weights_ = np.array([0.3, 0.5, 0.2])
X = gmm.sample(1000)
#------------------------------------------------------------
# Learn the best-fit GMM models
# Here we'll use GMM in the standard way: the fit() method
# uses an Expectation-Maximization approach to find the best
# mixture of Gaussians for the data
# fit models with 1-10 components
N = np.arange(1, 11)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GMM(N[i]).fit(X)
# compute the AIC and the BIC
AIC = [m.aic(X) for m in models]
BIC = [m.bic(X) for m in models]
#------------------------------------------------------------
# Plot the results
# We'll use three panels:
# 1) data + best-fit mixture
# 2) AIC and BIC vs number of components
# 3) probability that a point came from each component
fig = plt.figure(figsize=(5, 1.7))
fig.subplots_adjust(left=0.12, right=0.97,
bottom=0.21, top=0.9, wspace=0.5)
# plot 1: data + best-fit mixture
ax = fig.add_subplot(131)
M_best = models[np.argmin(AIC)]
x = np.linspace(-6, 6, 1000)
logprob, responsibilities = M_best.eval(x)
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
ax.hist(X, 30, normed=True, histtype='stepfilled', alpha=0.4)
ax.plot(x, pdf, '-k')
ax.plot(x, pdf_individual, '--k')
ax.text(0.04, 0.96, "Best-fit Mixture",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
# plot 2: AIC and BIC
ax = fig.add_subplot(132)
ax.plot(N, AIC, '-k', label='AIC')
ax.plot(N, BIC, '--k', label='BIC')
ax.set_xlabel('n. components')
ax.set_ylabel('information criterion')
ax.legend(loc=2)
# plot 3: posterior probabilities for each component
ax = fig.add_subplot(133)
p = M_best.predict_proba(x)
p = p[:, (1, 0, 2)] # rearrange order so the plot looks better
p = p.cumsum(1).T
ax.fill_between(x, 0, p[0], color='gray', alpha=0.3)
ax.fill_between(x, p[0], p[1], color='gray', alpha=0.5)
ax.fill_between(x, p[1], 1, color='gray', alpha=0.7)
ax.set_xlim(-6, 6)
ax.set_ylim(0, 1)
ax.set_xlabel('$x$')
ax.set_ylabel(r'$p({\rm class}|x)$')
ax.text(-5, 0.3, 'class 1', rotation='vertical')
ax.text(0, 0.5, 'class 2', rotation='vertical')
ax.text(3, 0.3, 'class 3', rotation='vertical')
plt.show()
| bsd-2-clause |
adlyons/AWOT | awot/io/common.py | 1 | 4765 | """
awot.io.common
==============
Common IO routines.
"""
from __future__ import print_function
import numpy as np
from netCDF4 import num2date, date2num
#################################
# variable/dictionary methods #
#################################
EPOCH_UNITS = 'seconds since 1970-1-1 00:00:00+0:00'
def _build_dict(data, units, longname, stdname):
newdict = {'data': data,
'units': units,
'long_name': longname,
'standard_name': stdname}
return newdict
def _ncvar_subset_masked(ncFile, ncvar, Good_Indices):
"""
Convert a NetCDF variable into a masked variable.
Assumes a 1D variable
"""
d = ncFile.variables[ncvar][Good_Indices]
np.ma.masked_invalid(d)
return d
def _ncvar_subset_to_dict(ncvar, Good_Indices):
"""
Convert a NetCDF Dataset variable to a dictionary.
Appropriated from Py-ART package.
Assumes subsetting in first column.
"""
d = dict((k, getattr(ncvar, k)) for k in ncvar.ncattrs())
d['data'] = ncvar[:]
if np.isscalar(d['data']):
# netCDF4 1.1.0+ returns a scalar for 0-dim array, we always want
# 1-dim+ arrays with a valid shape.
d['data'] = np.array(d['data'][Good_Indices, :])
d['data'].shape = (1, )
return d
def _ncvar_to_dict(ncvar):
"""
Convert a NetCDF Dataset variable to a dictionary.
Appropriated from PyArt package.
"""
d = dict((k, getattr(ncvar, k)) for k in ncvar.ncattrs())
d['data'] = ncvar[:]
if np.isscalar(d['data']):
# netCDF4 1.1.0+ returns a scalar for 0-dim array, we always want
# 1-dim+ arrays with a valid shape.
d['data'] = np.array(d['data'][:])
d['data'].shape = (1, )
return d
def _ncvar_to_dict_masked(ncvar, Good_Indices):
"""
Convert a NetCDF Dataset variable to a dictionary.
Appropriated from PyArt package.
"""
d = dict((k, getattr(ncvar, k)) for k in ncvar.ncattrs())
d['data'] = ncvar[Good_Indices]
if np.isscalar(d['data']):
# netCDF4 1.1.0+ returns a scalar for 0-dim array, we always want
# 1-dim+ arrays with a valid shape.
d['data'] = np.array(d['data'][:])
d['data'].shape = (1, )
return d
def _nasa_ames_var_to_dict(var, standard_name, long_name):
d = {}
d['standard_name'] = standard_name
d['long_name'] = long_name
d['units'] = " "
d['data'] = var
return d
def _h5var_to_dict(dataset, units=None, long_name=None, standard_name=None):
""" Convert an HDF5 Dataset to a dictionary."""
d = {}
if dataset.dtype.char == "S":
d['data'] = np.array(dataset)[0]
else:
d['data'] = np.array(dataset)
if len(dataset.attrs) > 0:
for attrname in list(dataset.attrs):
d[attrname] = dataset.attrs.get(attrname)
else:
d['standard_name'] = standard_name
d['long_name'] = long_name
d['units'] = units
return d
def _var_found(var):
'''Print variable found message.'''
print("Found %s" % var)
def _var_not_found(var):
'''Print variable not found message.'''
print("%s does not exist in file..." % var)
##################
# time methods #
##################
def _get_epoch_dict(TimeSec, time_units):
'''Output Epoch time dictionary.'''
# Convert the time array into a datetime instance
dtHrs = num2date(TimeSec, time_units)
# Now convert this datetime instance into a number of seconds since Epoch
TimeEpoch = date2num(dtHrs, EPOCH_UNITS)
# Now once again convert this data into a datetime instance
Time_unaware = num2date(TimeEpoch, EPOCH_UNITS)
Time = {'data': Time_unaware, 'units': EPOCH_UNITS,
'standard_name': 'Time', 'long_name': 'Time (UTC)'}
return Time
def convert_to_epoch_dict(datetime_dict):
'''Output Epoch time dictionary.'''
# Now convert this datetime instance into a number array
TimeSec = date2num(datetime_dict['data'], EPOCH_UNITS)
# Now once again convert data into a datetime instance
Time_unaware = num2date(TimeSec, EPOCH_UNITS)
Time = {'data': Time_unaware, 'units': EPOCH_UNITS,
'standard_name': 'Time', 'long_name': 'Time (UTC)'}
return Time
########################
# image save methods #
########################
def save_figure(self, figName='awot_plot', figType='png', **kwargs):
'''Save the current plot
Parameters
----------
figName : str
Figure name
figType : str
Figure format, default to .png
'''
plt.gca()
plt.gcf()
plt.savefig(figName+'.'+figType, format=figType)
print("Saved figure: %s.%s" % (figName, figType))
# Now close the plot to make sure matplotlib is happy
plt.close()
| gpl-2.0 |
johnnyliu27/openmc | openmc/filter_expansion.py | 1 | 16333 | from numbers import Integral, Real
from xml.etree import ElementTree as ET
import numpy as np
import pandas as pd
import openmc.checkvalue as cv
from . import Filter
class ExpansionFilter(Filter):
"""Abstract filter class for functional expansions."""
def __init__(self, order, filter_id=None):
self.order = order
self.id = filter_id
def __eq__(self, other):
if type(self) is not type(other):
return False
else:
return hash(self) == hash(other)
@property
def order(self):
return self._order
@order.setter
def order(self, order):
cv.check_type('expansion order', order, Integral)
cv.check_greater_than('expansion order', order, 0, equality=True)
self._order = order
def to_xml_element(self):
"""Return XML Element representing the filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing Legendre filter data
"""
element = ET.Element('filter')
element.set('id', str(self.id))
element.set('type', self.short_name.lower())
subelement = ET.SubElement(element, 'order')
subelement.text = str(self.order)
return element
class LegendreFilter(ExpansionFilter):
r"""Score Legendre expansion moments up to specified order.
This filter allows scores to be multiplied by Legendre polynomials of the
change in particle angle (:math:`\mu`) up to a user-specified order.
Parameters
----------
order : int
Maximum Legendre polynomial order
filter_id : int or None
Unique identifier for the filter
Attributes
----------
order : int
Maximum Legendre polynomial order
id : int
Unique identifier for the filter
num_bins : int
The number of filter bins
"""
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@ExpansionFilter.order.setter
def order(self, order):
ExpansionFilter.order.__set__(self, order)
self.bins = ['P{}'.format(i) for i in range(order + 1)]
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'].value.decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'].value.decode() + " instead")
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
out = cls(group['order'].value, filter_id)
return out
class SpatialLegendreFilter(ExpansionFilter):
r"""Score Legendre expansion moments in space up to specified order.
This filter allows scores to be multiplied by Legendre polynomials of the
the particle's position along a particular axis, normalized to a given
range, up to a user-specified order.
Parameters
----------
order : int
Maximum Legendre polynomial order
axis : {'x', 'y', 'z'}
Axis along which to take the expansion
minimum : float
Minimum value along selected axis
maximum : float
Maximum value along selected axis
filter_id : int or None
Unique identifier for the filter
Attributes
----------
order : int
Maximum Legendre polynomial order
axis : {'x', 'y', 'z'}
Axis along which to take the expansion
minimum : float
Minimum value along selected axis
maximum : float
Maximum value along selected axis
id : int
Unique identifier for the filter
num_bins : int
The number of filter bins
"""
def __init__(self, order, axis, minimum, maximum, filter_id=None):
super().__init__(order, filter_id)
self.axis = axis
self.minimum = minimum
self.maximum = maximum
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tAxis', self.axis)
string += '{: <16}=\t{}\n'.format('\tMin', self.minimum)
string += '{: <16}=\t{}\n'.format('\tMax', self.maximum)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tAxis', self.axis)
string += '{: <16}=\t{}\n'.format('\tMin', self.minimum)
string += '{: <16}=\t{}\n'.format('\tMax', self.maximum)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@ExpansionFilter.order.setter
def order(self, order):
ExpansionFilter.order.__set__(self, order)
self.bins = ['P{}'.format(i) for i in range(order + 1)]
@property
def axis(self):
return self._axis
@axis.setter
def axis(self, axis):
cv.check_value('axis', axis, ('x', 'y', 'z'))
self._axis = axis
@property
def minimum(self):
return self._minimum
@minimum.setter
def minimum(self, minimum):
cv.check_type('minimum', minimum, Real)
self._minimum = minimum
@property
def maximum(self):
return self._maximum
@maximum.setter
def maximum(self, maximum):
cv.check_type('maximum', maximum, Real)
self._maximum = maximum
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'].value.decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'].value.decode() + " instead")
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
order = group['order'].value
axis = group['axis'].value.decode()
min_, max_ = group['min'].value, group['max'].value
return cls(order, axis, min_, max_, filter_id)
def to_xml_element(self):
"""Return XML Element representing the filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing Legendre filter data
"""
element = super().to_xml_element()
subelement = ET.SubElement(element, 'axis')
subelement.text = self.axis
subelement = ET.SubElement(element, 'min')
subelement.text = str(self.minimum)
subelement = ET.SubElement(element, 'max')
subelement.text = str(self.maximum)
return element
class SphericalHarmonicsFilter(ExpansionFilter):
r"""Score spherical harmonic expansion moments up to specified order.
This filter allows you to obtain real spherical harmonic moments of either
the particle's direction or the cosine of the scattering angle. Specifying
a filter with order :math:`\ell` tallies moments for all orders from 0 to
:math:`\ell`.
Parameters
----------
order : int
Maximum spherical harmonics order, :math:`\ell`
filter_id : int or None
Unique identifier for the filter
Attributes
----------
order : int
Maximum spherical harmonics order, :math:`\ell`
id : int
Unique identifier for the filter
cosine : {'scatter', 'particle'}
How to handle the cosine term.
num_bins : int
The number of filter bins
"""
def __init__(self, order, filter_id=None):
super().__init__(order, filter_id)
self._cosine = 'particle'
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tCosine', self.cosine)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tCosine', self.cosine)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@ExpansionFilter.order.setter
def order(self, order):
ExpansionFilter.order.__set__(self, order)
self.bins = ['Y{},{}'.format(n, m)
for n in range(order + 1)
for m in range(-n, n + 1)]
@property
def cosine(self):
return self._cosine
@cosine.setter
def cosine(self, cosine):
cv.check_value('Spherical harmonics cosine treatment', cosine,
('scatter', 'particle'))
self._cosine = cosine
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'].value.decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'].value.decode() + " instead")
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
out = cls(group['order'].value, filter_id)
out.cosine = group['cosine'].value.decode()
return out
def to_xml_element(self):
"""Return XML Element representing the filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing spherical harmonics filter data
"""
element = super().to_xml_element()
element.set('cosine', self.cosine)
return element
class ZernikeFilter(ExpansionFilter):
r"""Score Zernike expansion moments in space up to specified order.
This filter allows scores to be multiplied by Zernike polynomials of the
particle's position normalized to a given unit circle, up to a
user-specified order. The standard Zernike polynomials follow the
definition by Born and Wolf, *Principles of Optics* and are defined as
.. math::
Z_n^m(\rho, \theta) = R_n^m(\rho) \cos (m\theta), \quad m > 0
Z_n^{m}(\rho, \theta) = R_n^{m}(\rho) \sin (m\theta), \quad m < 0
Z_n^{m}(\rho, \theta) = R_n^{m}(\rho), \quad m = 0
where the radial polynomials are
.. math::
R_n^m(\rho) = \sum\limits_{k=0}^{(n-m)/2} \frac{(-1)^k (n-k)!}{k! (
\frac{n+m}{2} - k)! (\frac{n-m}{2} - k)!} \rho^{n-2k}.
With this definition, the integral of :math:`(Z_n^m)^2` over the unit disk
is :math:`\frac{\epsilon_m\pi}{2n+2}` for each polynomial where
:math:`\epsilon_m` is 2 if :math:`m` equals 0 and 1 otherwise.
Specifying a filter with order N tallies moments for all :math:`n` from 0
to N and each value of :math:`m`. The ordering of the Zernike polynomial
moments follows the ANSI Z80.28 standard, where the one-dimensional index
:math:`j` corresponds to the :math:`n` and :math:`m` by
.. math::
j = \frac{n(n + 2) + m}{2}.
Parameters
----------
order : int
Maximum Zernike polynomial order
x : float
x-coordinate of center of circle for normalization
y : float
y-coordinate of center of circle for normalization
r : int or None
Radius of circle for normalization
Attributes
----------
order : int
Maximum Zernike polynomial order
x : float
x-coordinate of center of circle for normalization
y : float
y-coordinate of center of circle for normalization
r : int or None
Radius of circle for normalization
id : int
Unique identifier for the filter
num_bins : int
The number of filter bins
"""
def __init__(self, order, x=0.0, y=0.0, r=1.0, filter_id=None):
super().__init__(order, filter_id)
self.x = x
self.y = y
self.r = r
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tX', self.x)
string += '{: <16}=\t{}\n'.format('\tY', self.y)
string += '{: <16}=\t{}\n'.format('\tR', self.r)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tOrder', self.order)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@ExpansionFilter.order.setter
def order(self, order):
ExpansionFilter.order.__set__(self, order)
self.bins = ['Z{},{}'.format(n, m)
for n in range(order + 1)
for m in range(-n, n + 1, 2)]
@property
def x(self):
return self._x
@x.setter
def x(self, x):
cv.check_type('x', x, Real)
self._x = x
@property
def y(self):
return self._y
@y.setter
def y(self, y):
cv.check_type('y', y, Real)
self._y = y
@property
def r(self):
return self._r
@r.setter
def r(self, r):
cv.check_type('r', r, Real)
self._r = r
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'].value.decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'].value.decode() + " instead")
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
order = group['order'].value
x, y, r = group['x'].value, group['y'].value, group['r'].value
return cls(order, x, y, r, filter_id)
def to_xml_element(self):
"""Return XML Element representing the filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing Zernike filter data
"""
element = super().to_xml_element()
subelement = ET.SubElement(element, 'x')
subelement.text = str(self.x)
subelement = ET.SubElement(element, 'y')
subelement.text = str(self.y)
subelement = ET.SubElement(element, 'r')
subelement.text = str(self.r)
return element
class ZernikeRadialFilter(ZernikeFilter):
r"""Score the :math:`m = 0` (radial variation only) Zernike moments up to
specified order.
The Zernike polynomials are defined the same as in :class:`ZernikeFilter`.
.. math::
Z_n^{0}(\rho, \theta) = R_n^{0}(\rho)
where the radial polynomials are
.. math::
R_n^{0}(\rho) = \sum\limits_{k=0}^{n/2} \frac{(-1)^k (n-k)!}{k! ((
\frac{n}{2} - k)!)^{2}} \rho^{n-2k}.
With this definition, the integral of :math:`(Z_n^0)^2` over the unit disk
is :math:`\frac{\pi}{n+1}`.
If there is only radial dependency, the polynomials are integrated over
the azimuthal angles. The only terms left are :math:`Z_n^{0}(\rho, \theta)
= R_n^{0}(\rho)`. Note that :math:`n` could only be even orders.
Therefore, for a radial Zernike polynomials up to order of :math:`n`,
there are :math:`\frac{n}{2} + 1` terms in total. The indexing is from the
lowest even order (0) to highest even order.
Parameters
----------
order : int
Maximum radial Zernike polynomial order
x : float
x-coordinate of center of circle for normalization
y : float
y-coordinate of center of circle for normalization
r : int or None
Radius of circle for normalization
Attributes
----------
order : int
Maximum radial Zernike polynomial order
x : float
x-coordinate of center of circle for normalization
y : float
y-coordinate of center of circle for normalization
r : int or None
Radius of circle for normalization
id : int
Unique identifier for the filter
num_bins : int
The number of filter bins
"""
@ExpansionFilter.order.setter
def order(self, order):
ExpansionFilter.order.__set__(self, order)
self.bins = ['Z{},0'.format(n) for n in range(0, order+1, 2)]
| mit |
ibis-project/ibis-bigquery | tests/unit/test_compiler.py | 1 | 10157 | import datetime
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import packaging.version
import pandas as pd
import pytest
from ibis.expr.types import TableExpr
import ibis_bigquery
IBIS_VERSION = packaging.version.Version(ibis.__version__)
IBIS_1_4_VERSION = packaging.version.Version("1.4.0")
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(datetime.date(2017, 1, 1), "DATE '2017-01-01'", dt.date),
(pd.Timestamp('2017-01-01'), "DATE '2017-01-01'", dt.date,),
('2017-01-01', "DATE '2017-01-01'", dt.date),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
],
)
def test_literal_date(case, expected, dtype):
expr = ibis.literal(case, type=dtype).year()
result = ibis_bigquery.compile(expr)
assert result == f"SELECT EXTRACT(year from {expected}) AS `tmp`"
@pytest.mark.parametrize(
('case', 'expected', 'dtype', 'strftime_func'),
[
(
datetime.date(2017, 1, 1),
"DATE '2017-01-01'",
dt.date,
'FORMAT_DATE',
),
(
pd.Timestamp('2017-01-01'),
"DATE '2017-01-01'",
dt.date,
'FORMAT_DATE',
),
('2017-01-01', "DATE '2017-01-01'", dt.date, 'FORMAT_DATE',),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
'FORMAT_TIMESTAMP',
),
],
)
def test_day_of_week(case, expected, dtype, strftime_func):
date_var = ibis.literal(case, type=dtype)
expr_index = date_var.day_of_week.index()
result = ibis_bigquery.compile(expr_index)
assert (
result
== f"SELECT MOD(EXTRACT(DAYOFWEEK FROM {expected}) + 5, 7) AS `tmp`"
)
expr_name = date_var.day_of_week.full_name()
result = ibis_bigquery.compile(expr_name)
if strftime_func == 'FORMAT_TIMESTAMP':
assert (
result
== f"SELECT {strftime_func}('%A', {expected}, 'UTC') AS `tmp`"
)
else:
assert result == f"SELECT {strftime_func}('%A', {expected}) AS `tmp`"
@pytest.mark.parametrize(
("case", "expected", "dtype"),
[
("test of hash", "'test of hash'", dt.string,),
(b"test of hash", "FROM_BASE64('dGVzdCBvZiBoYXNo')", dt.binary,),
],
)
def test_hash(case, expected, dtype):
if IBIS_VERSION < IBIS_1_4_VERSION:
pytest.skip("requires ibis 1.4+")
string_var = ibis.literal(case, type=dtype)
expr = string_var.hash(how="farm_fingerprint")
result = ibis_bigquery.compile(expr)
assert result == f"SELECT farm_fingerprint({expected}) AS `tmp`"
@pytest.mark.parametrize(
("case", "expected", "how", "dtype"),
[
("test", "md5('test')", "md5", dt.string,),
(b"test", "md5(FROM_BASE64('dGVzdA=='))", "md5", dt.binary,),
("test", "sha1('test')", "sha1", dt.string,),
(b"test", "sha1(FROM_BASE64('dGVzdA=='))", "sha1", dt.binary,),
("test", "sha256('test')", "sha256", dt.string,),
(b"test", "sha256(FROM_BASE64('dGVzdA=='))", "sha256", dt.binary,),
("test", "sha512('test')", "sha512", dt.string,),
(b"test", "sha512(FROM_BASE64('dGVzdA=='))", "sha512", dt.binary,),
],
)
def test_hashbytes(case, expected, how, dtype):
if IBIS_VERSION < IBIS_1_4_VERSION:
pytest.skip("requires ibis 1.4+")
var = ibis.literal(case, type=dtype)
expr = var.hashbytes(how=how)
result = ibis_bigquery.compile(expr)
assert result == f"SELECT {expected} AS `tmp`"
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '2017-01-01 04:55:59'",
dt.timestamp,
),
(datetime.time(4, 55, 59), "TIME '04:55:59'", dt.time),
('04:55:59', "TIME '04:55:59'", dt.time),
],
)
def test_literal_timestamp_or_time(case, expected, dtype):
expr = ibis.literal(case, type=dtype).hour()
result = ibis_bigquery.compile(expr)
assert result == f"SELECT EXTRACT(hour from {expected}) AS `tmp`"
def test_projection_fusion_only_peeks_at_immediate_parent():
if IBIS_VERSION < IBIS_1_4_VERSION:
pytest.skip("requires ibis 1.4+")
schema = [
('file_date', 'timestamp'),
('PARTITIONTIME', 'date'),
('val', 'int64'),
]
table = ibis.table(schema, name='unbound_table')
table = table[table.PARTITIONTIME < ibis.date('2017-01-01')]
table = table.mutate(file_date=table.file_date.cast('date'))
table = table[table.file_date < ibis.date('2017-01-01')]
table = table.mutate(XYZ=table.val * 2)
expr = table.join(table.view())[table]
result = ibis_bigquery.compile(expr)
expected = """\
WITH t0 AS (
SELECT *
FROM unbound_table
WHERE `PARTITIONTIME` < DATE '2017-01-01'
),
t1 AS (
SELECT CAST(`file_date` AS DATE) AS `file_date`, `PARTITIONTIME`, `val`
FROM t0
),
t2 AS (
SELECT t1.*
FROM t1
WHERE t1.`file_date` < DATE '2017-01-01'
),
t3 AS (
SELECT *, `val` * 2 AS `XYZ`
FROM t2
)
SELECT t3.*
FROM t3
INNER JOIN t3 t4"""
assert result == expected
@pytest.mark.parametrize(
('unit', 'expected_unit', 'expected_func'),
[
('Y', 'YEAR', 'TIMESTAMP'),
('Q', 'QUARTER', 'TIMESTAMP'),
('M', 'MONTH', 'TIMESTAMP'),
('W', 'WEEK', 'TIMESTAMP'),
('D', 'DAY', 'TIMESTAMP'),
('h', 'HOUR', 'TIMESTAMP'),
('m', 'MINUTE', 'TIMESTAMP'),
('s', 'SECOND', 'TIMESTAMP'),
('ms', 'MILLISECOND', 'TIMESTAMP'),
('us', 'MICROSECOND', 'TIMESTAMP'),
('Y', 'YEAR', 'DATE'),
('Q', 'QUARTER', 'DATE'),
('M', 'MONTH', 'DATE'),
('W', 'WEEK', 'DATE'),
('D', 'DAY', 'DATE'),
('h', 'HOUR', 'TIME'),
('m', 'MINUTE', 'TIME'),
('s', 'SECOND', 'TIME'),
('ms', 'MILLISECOND', 'TIME'),
('us', 'MICROSECOND', 'TIME'),
],
)
def test_temporal_truncate(unit, expected_unit, expected_func):
t = ibis.table([('a', getattr(dt, expected_func.lower()))], name='t')
expr = t.a.truncate(unit)
result = ibis_bigquery.compile(expr)
expected = f"""\
SELECT {expected_func}_TRUNC(`a`, {expected_unit}) AS `tmp`
FROM t"""
assert result == expected
@pytest.mark.parametrize('kind', ['date', 'time'])
def test_extract_temporal_from_timestamp(kind):
t = ibis.table([('ts', dt.timestamp)], name='t')
expr = getattr(t.ts, kind)()
result = ibis_bigquery.compile(expr)
expected = f"""\
SELECT {kind.upper()}(`ts`) AS `tmp`
FROM t"""
assert result == expected
def test_now():
expr = ibis.now()
result = ibis_bigquery.compile(expr)
expected = 'SELECT CURRENT_TIMESTAMP() AS `tmp`'
assert result == expected
def test_binary():
t = ibis.table([('value', 'double')], name='t')
expr = t["value"].cast(dt.binary).name("value_hash")
result = ibis_bigquery.compile(expr)
expected = """\
SELECT CAST(`value` AS BYTES) AS `tmp`
FROM t"""
assert result == expected
def test_substring():
t = ibis.table([('value', 'string')], name='t')
expr = t["value"].substr(3, -1)
with pytest.raises(Exception) as exception_info:
ibis_bigquery.compile(expr)
expected = 'Length parameter should not be a negative value.'
assert str(exception_info.value) == expected
def test_bucket():
t = ibis.table([('value', 'double')], name='t')
buckets = [0, 1, 3]
expr = t.value.bucket(buckets).name('foo')
result = ibis_bigquery.compile(expr)
expected = """\
SELECT
CASE
WHEN (`value` >= 0) AND (`value` < 1) THEN 0
WHEN (`value` >= 1) AND (`value` <= 3) THEN 1
ELSE CAST(NULL AS INT64)
END AS `tmp`
FROM t"""
assert result == expected
@pytest.mark.parametrize(
('kind', 'begin', 'end', 'expected'),
[
('preceding', None, 1, 'UNBOUNDED PRECEDING AND 1 PRECEDING'),
('following', 1, None, '1 FOLLOWING AND UNBOUNDED FOLLOWING'),
],
)
def test_window_unbounded(kind, begin, end, expected):
t = ibis.table([('a', 'int64')], name='t')
kwargs = {kind: (begin, end)}
expr = t.a.sum().over(ibis.window(**kwargs))
result = ibis_bigquery.compile(expr)
assert (
result
== f"""\
SELECT sum(`a`) OVER (ROWS BETWEEN {expected}) AS `tmp`
FROM t"""
)
def test_large_compile():
"""
Tests that compiling a large expression tree finishes
within a reasonable amount of time
"""
num_columns = 20
num_joins = 7
class MockBigQueryClient(ibis_bigquery.BigQueryClient):
def __init__(self):
pass
names = [f"col_{i}" for i in range(num_columns)]
schema = ibis.Schema(names, ['string'] * num_columns)
ibis_client = MockBigQueryClient()
table = TableExpr(
ops.SQLQueryResult("select * from t", schema, ibis_client)
)
for _ in range(num_joins):
table = table.mutate(dummy=ibis.literal(""))
table = table.left_join(table, ["dummy"])[[table]]
start = datetime.datetime.now()
table.compile()
delta = datetime.datetime.now() - start
assert delta.total_seconds() < 10
| apache-2.0 |
GitYiheng/reinforcement_learning_test | test00_previous_files/mc_pi_dst_fv_es.py | 1 | 2517 | # Monte Carlo policy iteration
# Deterministic state transition
# First-visit
# Exploring starts
import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from dp_ipe_dst_dp import print_values, print_policy
SMALL_ENOUGH = 1e-4
GAMMA = 0.9
def play_one_episode(grid, policy):
valid_states = list(grid.actions.keys())
start_state_index = np.random.choice(len(valid_states))
s = grid.set_state(valid_states[start_state_index])
a = np.random.choice(list(policy[s]))
states_actions_rewards = [(s, a, 0)]
visited_states = set()
while True:
old_s = grid.current_state()
r = grid.move(a)
s = grid.current_state()
if s in visited_states:
states_actions_rewards.append((s, None, -100))
break
elif grid.game_over():
states_actions_rewards.append((s, None, r))
break
else:
a = policy[s]
states_actions_rewards.append((s, a, r))
visited_states.add(s)
G = 0
states_actions_returns = []
first = True
for s, a, r in reversed(states_actions_rewards):
if first:
first = False
else:
states_actions_returns.append((s, a, G))
G = r + GAMMA * G
states_actions_returns.reverse()
return states_actions_returns
def max_dict(d):
max_key = None
max_value = float('-inf')
for key, value in d.items():
if value > max_value:
max_value = value
max_key = key
return max_key, max_value
if __name__ == '__main__':
grid = negative_grid(step_cost=-0.1)
print("Rewards:")
print_values(grid.rewards, grid)
policy = {}
for s in grid.actions.keys():
policy[s] = np.random.choice(list(grid.actions[s]))
Q = {}
returns = {}
states = grid.all_states()
for s in grid.actions.keys():
Q[s] = {}
for a in grid.actions[s]:
Q[s][a] = 0
returns[(s, a)] = []
deltas = []
for i in range(2000):
if i % 100 == 0:
print(i)
biggest_change = 0
states_actions_returns = play_one_episode(grid, policy)
visited_state_action_pairs = set()
for s, a, G in states_actions_returns:
sa = (s, a)
if sa not in visited_state_action_pairs:
old_q = Q[s][a]
returns[sa].append(G)
Q[s][a] = np.mean(returns[sa])
biggest_change = max(biggest_change, np.abs(old_q - Q[s][a]))
visited_state_action_pairs.add(sa)
deltas.append(biggest_change)
for s in policy.keys():
policy[s] = max_dict(Q[s])[0]
# plt.plot(deltas)
# plt.show()
print("Final policy:")
print_policy(policy, grid)
V = {}
for s, Qs in Q.items():
V[s] = max_dict(Q[s])[1]
print("Final values:")
print_values(V, grid)
| mit |
kashif/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
ssahli/Python-Neural-Network | Demo.py | 1 | 1716 | import matplotlib.pyplot as plt
import pylab
import numpy as np
from sklearn import datasets
from NeuralNet import NeuralNetwork
# Tunable parameters. Go nuts.
HIDDEN_NODES = 128
LEARNING_RATE = 0.3
ITERATIONS = 50
VIEW_EXAMPLES = True
VIEW_PLOT = True
'''
Lets view a few examples from the original dataset.
source of code: scikit-learn.org
'''
if VIEW_EXAMPLES == True:
digits = datasets.load_digits()
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:10]):
plt.subplot(4, 3, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
pylab.show()
'''
Load the data. For this demo, we're using sklearn's digits dataset
Digits are 8x8 pixel images. Each row is one image, in a linear format,
where columns 65-74 correspond to one hot encoded responses representing
digits 0 through 9. 1797 rows 74 columns
'''
data = np.loadtxt("transformed.csv", delimiter = ',')
m = len(data)
# Split the data into training set and test set.
train_set = data[:(3*m/4),:]
test_set = data[m/4:,:]
# Instantiate a new neural network. 64 input, 64 hidden, 10 output nodes.
NN = NeuralNetwork(64,HIDDEN_NODES,10,LEARNING_RATE,ITERATIONS)
# Train on the training set, test on the test set. The test() function
# will print out the percent correctness on the test set.
errors = NN.train(train_set)
NN.test(test_set)
# Plot the error curve
if VIEW_PLOT == True:
plt.plot(errors)
plt.title("Average Error Per Iteration On Training Set")
plt.xlabel("Iteration")
plt.ylabel("Average Error")
pylab.show()
| gpl-3.0 |
lin-credible/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
CompPhysics/MachineLearning | doc/src/SupportVMachines/Programs/xgcancer.py | 2 | 1413 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_validate
import scikitplot as skplt
import xgboost as xgb
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
xg_clf = xgb.XGBClassifier(max_depth = 4, n_estimators = 200)
xg_clf.fit(X_train_scaled,y_train)
y_test = xg_clf.predict(X_test_scaled)
print("Test set accuracy with Random Forests and scaled data: {:.2f}".format(xg_clf.score(X_test_scaled,y_test)))
import scikitplot as skplt
y_pred = xg_clf.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
plt.show()
y_probas = xg_clf.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
plt.show()
xgb.plot_tree(xg_clf,num_trees=0)
plt.rcParams['figure.figsize'] = [50, 10]
plt.show()
xgb.plot_importance(xg_clf)
plt.rcParams['figure.figsize'] = [5, 5]
plt.show()
| cc0-1.0 |
pradyu1993/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 3 | 1765 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print lasso
print "r^2 on test data : %f" % r2_score_lasso
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, rho=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print enet
print "r^2 on test data : %f" % r2_score_enet
pl.plot(enet.coef_, label='Elastic net coefficients')
pl.plot(lasso.coef_, label='Lasso coefficients')
pl.plot(coef, '--', label='original coefficients')
pl.legend(loc='best')
pl.title("Lasso R^2: %f, Elastic Net R^2: %f" % (r2_score_lasso,
r2_score_enet))
pl.show()
| bsd-3-clause |
DGrady/pandas | pandas/io/packers.py | 4 | 27509 | """
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical, CategoricalIndex)
from pandas._libs.tslib import NaTType
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer, _stringify_path
from pandas.core.internals import BlockManager, make_block, _safe_reshape
import pandas.core.internals as internals
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64, NaTType)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'freq': freq,
u'tz': tz}
if isinstance(obj, NaTType):
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
freq = obj[u'freq'] if 'freq' in obj else obj[u'offset']
return Timestamp(obj[u'value'], tz=obj[u'tz'], freq=freq)
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']]._from_ordinals(data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=pd_dtype,
name=obj[u'name'])
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = _safe_reshape(unconvert(
b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']), b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
| bsd-3-clause |
ajerneck/thatsfordinner | frontend/app/tmp.py | 1 | 2982 | import collections
import psycopg2
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine("postgresql+psycopg2://explore:Ln2bOYAVCG6utNUSaSZaIVMH@localhost/explore")
#con = psycopg2.connect(host='localhost', dbname='explore', user='explore', password='Ln2bOYAVCG6utNUSaSZaIVMH')
ww = pd.read_sql_table('all_word_probs', engine)
xx = ww.sort(['label','prob']).groupby('label').tail(20)
## get the 10 most probable within each group.
ws = xx[xx['label']==0]['word'].values
## loop over words, filter out those that are in other bigsams
wss = [w.split() for w in ws]
bs = []
us = []
for w in wss:
if len(w) > 1:
bs.append(w)
else:
us.append(w)
keep = []
for u in us:
hits = [b for b in bs if u[0] in b]
if hits == []:
keep.append(u)
## this works.
keep = []
for w in itertools.chain(*wss):
print w
if w not in keep:
keep.append(w)
## TODO: sum up the probabilities:
## do nlp on words to identify nouns.
## do decorate, sort, undecorate
for row in ws.values.tolist():
print row
words = row[0].split()
prob = row[1]
for w in words:
ks[w] += prob
k = collections.defaultdict(float)
for w in itertools(*wss):
k[w] +=
k = dict.fromkeys(itertools.chain(*wss), 0)
x = {}
import itertools
dict.fromkeys(itertools.chain(*wss))
ws = xx[xx['label']==0][['word','prob']]
ws['word'].values
ws['word'].values
wss
bs = [w for w in wss if len(w)>1]
for w in ws:
if
use 'low' in 'low chicken'
# cur = con.cursor()
# cur.execute('select * from all_word_probs order by label')
# ww = cur.fetchall()
gen_recipes = collections.defaultdict(list)
for topic, group in ww.groupby('label'):
# print topic
# print [(np.random.choice(group['word'], size=5, p=group['prob'])) for _ in range(0,2)]
x = [np.random.choice(group['word'], size=5, p=group['prob']).tolist() for i in range(0, 2)]
gen_recipes[topic] += [map(lambda i: ', '.join(i), x)]
print gen_recipes
## extract most probable words for each topic.
cur.execute('SELECT * FROM word_probs order by topic, prob desc;')
word_probs = cur.fetchall()
word_data = collections.defaultdict(list)
for row in word_probs:
word_data[row[2]] += [row[3]]
for k,v in word_data.items():
word_data[k] = ', '.join(v)
## extract most probable documents for each topic.
cur.execute("SELECT doc_prob.topic, ingredient_txt, image, url, clean_recipes.title, prob FROM doc_prob, clean_recipes WHERE doc_prob.recipe_key=clean_recipes.key ORDER BY topic, rank;")
doc_probs = cur.fetchall()
doc_data = collections.defaultdict(list)
for row in doc_probs:
doc_data[row[0]] += [{'ingredient': row[1], 'image':row[2], 'url':row[3], 'title':row[4]}]
topics = doc_data.keys()
## TODO:
# extract max topic assignment for each topic
## cur.execute('SELECT topic, ingredient_txt, image, url, title')
## topic_docs =
topic_data = collections.defaultdict(dict)
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/kernel_ridge.py | 12 | 7382 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number. Set to "precomputed" in
order to pass a precomputed kernel matrix to the estimator
methods instead of samples.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction. If
kernel == "precomputed" this is instead the precomputed
training matrix, shape = [n_samples, n_samples].
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
sklearn.linear_model.Ridge:
Linear ridge regression.
sklearn.svm.SVR:
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data. If kernel == "precomputed" this is instead
a precomputed kernel matrix, shape = [n_samples,
n_samples].
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples. If kernel == "precomputed" this is instead a
precomputed kernel matrix, shape = [n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for this estimator.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
equialgo/scikit-learn | sklearn/setup.py | 69 | 3201 | import os
from os.path import join
import warnings
from sklearn._build_utils import maybe_cythonize_extensions
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension('_isotonic',
sources=['_isotonic.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
maybe_cythonize_extensions(top_path, config)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
466152112/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
alperyeg/elephant | elephant/current_source_density_src/icsd.py | 9 | 35175 | # -*- coding: utf-8 -*-
'''
py-iCSD toolbox!
Translation of the core functionality of the CSDplotter MATLAB package
to python.
The methods were originally developed by Klas H. Pettersen, as described in:
Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute T. Einevoll,
Current-source density estimation based on inversion of electrostatic forward
solution: Effects of finite extent of neuronal activity and conductivity
discontinuities, Journal of Neuroscience Methods, Volume 154, Issues 1-2,
30 June 2006, Pages 116-133, ISSN 0165-0270,
http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
The method themselves are implemented as callable subclasses of the base
CSD class object, which sets some common attributes,
and a basic function for calculating the iCSD, and a generic spatial filter
implementation.
The raw- and filtered CSD estimates are returned as Quantity arrays.
Requires pylab environment to work, i.e numpy+scipy+matplotlib, with the
addition of quantities (http://pythonhosted.org/quantities) and
neo (https://pythonhosted.org/neo)-
Original implementation from CSDplotter-0.1.1
(http://software.incf.org/software/csdplotter) by Klas. H. Pettersen 2005.
Written by:
- [email protected], 2010,
- [email protected], 2015-2016
'''
import numpy as np
import scipy.integrate as si
import scipy.signal as ss
import quantities as pq
class CSD(object):
'''Base iCSD class'''
def __init__(self, lfp, f_type='gaussian', f_order=(3, 1)):
'''Initialize parent class iCSD
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps)
f_type : str
type of spatial filter, must be a scipy.signal filter design method
f_order : list
settings for spatial filter, arg passed to filter design function
'''
self.name = 'CSD estimate parent class'
self.lfp = lfp
self.f_matrix = np.eye(lfp.shape[0]) * pq.m**3 / pq.S
self.f_type = f_type
self.f_order = f_order
def get_csd(self, ):
'''
Perform the CSD estimate from the LFP and forward matrix F, i.e as
CSD=F**-1*LFP
Arguments
---------
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.linalg.solve(self.f_matrix, self.lfp)
return csd * (self.f_matrix.units**-1 * self.lfp.units).simplified
def filter_csd(self, csd, filterfunction='convolve'):
'''
Spatial filtering of the CSD estimate, using an N-point filter
Arguments
---------
csd : np.ndarrray * quantity.Quantity
Array with the csd estimate
filterfunction : str
'filtfilt' or 'convolve'. Apply spatial filter using
scipy.signal.filtfilt or scipy.signal.convolve.
'''
if self.f_type == 'gaussian':
try:
assert(len(self.f_order) == 2)
except AssertionError as ae:
raise ae('filter order f_order must be a tuple of length 2')
else:
try:
assert(self.f_order > 0 and isinstance(self.f_order, int))
except AssertionError as ae:
raise ae('Filter order must be int > 0!')
try:
assert(filterfunction in ['filtfilt', 'convolve'])
except AssertionError as ae:
raise ae("{} not equal to 'filtfilt' or \
'convolve'".format(filterfunction))
if self.f_type == 'boxcar':
num = ss.boxcar(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'hamming':
num = ss.hamming(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'triangular':
num = ss.triang(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'gaussian':
num = ss.gaussian(self.f_order[0], self.f_order[1])
denom = np.array([num.sum()])
elif self.f_type == 'identity':
num = np.array([1.])
denom = np.array([1.])
else:
print('%s Wrong filter type!' % self.f_type)
raise
num_string = '[ '
for i in num:
num_string = num_string + '%.3f ' % i
num_string = num_string + ']'
denom_string = '[ '
for i in denom:
denom_string = denom_string + '%.3f ' % i
denom_string = denom_string + ']'
print(('discrete filter coefficients: \nb = {}, \
\na = {}'.format(num_string, denom_string)))
if filterfunction == 'filtfilt':
return ss.filtfilt(num, denom, csd, axis=0) * csd.units
elif filterfunction == 'convolve':
csdf = csd / csd.units
for i in range(csdf.shape[1]):
csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same')
return csdf * csd.units
class StandardCSD(CSD):
'''
Standard CSD method with and without Vaknin electrodes
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize standard CSD method class with & without Vaknin electrodes.
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m, must be monotonously increasing
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S/m
vaknin_el : bool
flag for using method of Vaknin to endpoint electrodes
Defaults to True
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
diff_diff_coord = np.diff(np.diff(coord_electrode)).magnitude
zeros_ddc = np.zeros_like(diff_diff_coord)
try:
assert(np.all(np.isclose(diff_diff_coord, zeros_ddc, atol=1e-12)))
except AssertionError as ae:
print('coord_electrode not monotonously varying')
raise ae
if self.vaknin_el:
# extend lfps array by duplicating potential at endpoint contacts
if lfp.ndim == 1:
self.lfp = np.empty((lfp.shape[0] + 2, )) * lfp.units
else:
self.lfp = np.empty((lfp.shape[0] + 2, lfp.shape[1])) * lfp.units
self.lfp[0, ] = lfp[0, ]
self.lfp[1:-1, ] = lfp
self.lfp[-1, ] = lfp[-1, ]
else:
self.lfp = lfp
self.name = 'Standard CSD method'
self.coord_electrode = coord_electrode
self.f_inv_matrix = self.get_f_inv_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.vaknin_el = kwargs.pop('vaknin_el', True)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_inv_matrix(self):
'''Calculate the inverse F-matrix for the standard CSD method'''
h_val = abs(np.diff(self.coord_electrode)[0])
f_inv = -np.eye(self.lfp.shape[0])
# Inner matrix elements is just the discrete laplacian coefficients
for j in range(1, f_inv.shape[0] - 1):
f_inv[j, j - 1: j + 2] = np.array([1., -2., 1.])
return f_inv * -self.sigma / h_val
def get_csd(self):
'''
Perform the iCSD calculation, i.e: iCSD=F_inv*LFP
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.dot(self.f_inv_matrix, self.lfp)[1:-1, ]
# `np.dot()` does not return correct units, so the units of `csd` must
# be assigned manually
csd_units = (self.f_inv_matrix.units * self.lfp.units).simplified
csd = csd.magnitude * csd_units
return csd
class DeltaiCSD(CSD):
'''
delta-iCSD method
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize the delta-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'delta-iCSD method'
self.coord_electrode = coord_electrode
# initialize F- and iCSD-matrices
self.f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size))
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix'''
f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size)) * self.coord_electrode.units
for j in range(self.coord_electrode.size):
for i in range(self.coord_electrode.size):
f_matrix[j, i] = ((np.sqrt((self.coord_electrode[j] -
self.coord_electrode[i])**2 +
(self.diam[j] / 2)**2) - abs(self.coord_electrode[j] -
self.coord_electrode[i])) +
(self.sigma - self.sigma_top) / (self.sigma +
self.sigma_top) *
(np.sqrt((self.coord_electrode[j] +
self.coord_electrode[i])**2 + (self.diam[j] / 2)**2)-
abs(self.coord_electrode[j] + self.coord_electrode[i])))
f_matrix /= (2 * self.sigma)
return f_matrix
class StepiCSD(CSD):
'''step-iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing step-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float or np.ndarray * quantity.Quantity
diameter(s) of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
h : float or np.ndarray * quantity.Quantity
assumed thickness of the source cylinders at all or each contact
Defaults to np.ones(15) * 100E-6 * pq.m
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
try:
assert(self.h.size == 1 or self.h.size == coord_electrode.size)
if self.h.size == coord_electrode.size:
assert(np.all(self.h > 0 * self.h.units))
except AssertionError as ae:
print('h must be scalar or of same shape as coord_electrode')
raise ae
if self.h.size == 1:
self.h = np.ones(coord_electrode.size) * self.h
self.name = 'step-iCSD method'
self.coord_electrode = coord_electrode
# compute forward-solution matrix
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.h = kwargs.pop('h', np.ones(23) * 100E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate F-matrix for step iCSD method'''
el_len = self.coord_electrode.size
f_matrix = np.zeros((el_len, el_len))
for j in range(el_len):
for i in range(el_len):
lower_int = self.coord_electrode[i] - self.h[j] / 2
if lower_int < 0:
lower_int = self.h[j].units
upper_int = self.coord_electrode[i] + self.h[j] / 2
# components of f_matrix object
f_cyl0 = si.quad(self._f_cylinder,
a=lower_int, b=upper_int,
args=(float(self.coord_electrode[j]),
float(self.diam[j]),
float(self.sigma)),
epsabs=self.tol)[0]
f_cyl1 = si.quad(self._f_cylinder, a=lower_int, b=upper_int,
args=(-float(self.coord_electrode[j]),
float(self.diam[j]), float(self.sigma)),
epsabs=self.tol)[0]
# method of images coefficient
mom = (self.sigma - self.sigma_top) / (self.sigma + self.sigma_top)
f_matrix[j, i] = f_cyl0 + mom * f_cyl1
# assume si.quad trash the units
return f_matrix * self.h.units**2 / self.sigma.units
def _f_cylinder(self, zeta, z_val, diam, sigma):
'''function used by class method'''
f_cyl = 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
return f_cyl
class SplineiCSD(CSD):
'''spline iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing spline-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
num_steps : int
number of data points for the spatially upsampled LFP/CSD data
Defaults to 200
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
except AssertionError as ae:
print('diam must be scalar or of same shape as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'spline-iCSD method'
self.coord_electrode = coord_electrode
# compute stuff
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.num_steps = kwargs.pop('num_steps', 200)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix for cubic spline iCSD method'''
el_len = self.coord_electrode.size
z_js = np.zeros(el_len + 1)
z_js[:-1] = np.array(self.coord_electrode)
z_js[-1] = z_js[-2] + float(np.diff(self.coord_electrode).mean())
# Define integration matrixes
f_mat0 = np.zeros((el_len, el_len + 1))
f_mat1 = np.zeros((el_len, el_len + 1))
f_mat2 = np.zeros((el_len, el_len + 1))
f_mat3 = np.zeros((el_len, el_len + 1))
# Calc. elements
for j in range(el_len):
for i in range(el_len):
f_mat0[j, i] = si.quad(self._f_mat0, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat1[j, i] = si.quad(self._f_mat1, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat2[j, i] = si.quad(self._f_mat2, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat3[j, i] = si.quad(self._f_mat3, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
# image technique if conductivity not constant:
if self.sigma != self.sigma_top:
f_mat0[j, i] = f_mat0[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat0, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1],
float(self.sigma), float(self.diam[j])), \
epsabs=self.tol)[0]
f_mat1[j, i] = f_mat1[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat1, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat2[j, i] = f_mat2[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat2, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat3[j, i] = f_mat3[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat3, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
e_mat0, e_mat1, e_mat2, e_mat3 = self._calc_e_matrices()
# Calculate the F-matrix
f_matrix = np.eye(el_len + 2)
f_matrix[1:-1, :] = np.dot(f_mat0, e_mat0) + \
np.dot(f_mat1, e_mat1) + \
np.dot(f_mat2, e_mat2) + \
np.dot(f_mat3, e_mat3)
return f_matrix * self.coord_electrode.units**2 / self.sigma.units
def get_csd(self):
'''
Calculate the iCSD using the spline iCSD method
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with csd estimate
'''
e_mat = self._calc_e_matrices()
el_len = self.coord_electrode.size
# padding the lfp with zeros on top/bottom
if self.lfp.ndim == 1:
cs_lfp = np.r_[[0], np.asarray(self.lfp), [0]].reshape(1, -1).T
csd = np.zeros(self.num_steps)
else:
cs_lfp = np.vstack((np.zeros(self.lfp.shape[1]),
np.asarray(self.lfp),
np.zeros(self.lfp.shape[1])))
csd = np.zeros((self.num_steps, self.lfp.shape[1]))
cs_lfp *= self.lfp.units
# CSD coefficients
csd_coeff = np.linalg.solve(self.f_matrix, cs_lfp)
# The cubic spline polynomial coefficients
a_mat0 = np.dot(e_mat[0], csd_coeff)
a_mat1 = np.dot(e_mat[1], csd_coeff)
a_mat2 = np.dot(e_mat[2], csd_coeff)
a_mat3 = np.dot(e_mat[3], csd_coeff)
# Extend electrode coordinates in both end by min contact interdistance
h = np.diff(self.coord_electrode).min()
z_js = np.zeros(el_len + 2)
z_js[0] = self.coord_electrode[0] - h
z_js[1: -1] = self.coord_electrode
z_js[-1] = self.coord_electrode[-1] + h
# create high res spatial grid
out_zs = np.linspace(z_js[1], z_js[-2], self.num_steps)
# Calculate iCSD estimate on grid from polynomial coefficients.
i = 0
for j in range(self.num_steps):
if out_zs[j] >= z_js[i + 1]:
i += 1
csd[j, ] = a_mat0[i, :] + a_mat1[i, :] * \
(out_zs[j] - z_js[i]) + \
a_mat2[i, :] * (out_zs[j] - z_js[i])**2 + \
a_mat3[i, :] * (out_zs[j] - z_js[i])**3
csd_unit = (self.f_matrix.units**-1 * self.lfp.units).simplified
return csd * csd_unit
def _f_mat0(self, zeta, z_val, sigma, diam):
'''0'th order potential function'''
return 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
def _f_mat1(self, zeta, z_val, zi_val, sigma, diam):
'''1'th order potential function'''
return (zeta - zi_val) * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat2(self, zeta, z_val, zi_val, sigma, diam):
'''2'nd order potential function'''
return (zeta - zi_val)**2 * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat3(self, zeta, z_val, zi_val, sigma, diam):
'''3'rd order potential function'''
return (zeta - zi_val)**3 * self._f_mat0(zeta, z_val, sigma, diam)
def _calc_k_matrix(self):
'''Calculate the K-matrix used by to calculate E-matrices'''
el_len = self.coord_electrode.size
h = float(np.diff(self.coord_electrode).min())
c_jm1 = np.eye(el_len + 2, k=0) / h
c_jm1[0, 0] = 0
c_j0 = np.eye(el_len + 2) / h
c_j0[-1, -1] = 0
c_jall = c_j0
c_jall[0, 0] = 1
c_jall[-1, -1] = 1
tjp1 = np.eye(el_len + 2, k=1)
tjm1 = np.eye(el_len + 2, k=-1)
tj0 = np.eye(el_len + 2)
tj0[0, 0] = 0
tj0[-1, -1] = 0
# Defining K-matrix used to calculate e_mat1-3
return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +
2 * np.dot(c_jm1, tj0) +
2 * c_jall +
np.dot(c_j0, tjp1)),
3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -
np.dot(np.dot(c_jm1, c_jm1), tjm1) +
np.dot(np.dot(c_j0, c_j0), tjp1) -
np.dot(np.dot(c_j0, c_j0), tj0)))
def _calc_e_matrices(self):
'''Calculate the E-matrices used by cubic spline iCSD method'''
el_len = self.coord_electrode.size
# expanding electrode grid
h = float(np.diff(self.coord_electrode).min())
# Define transformation matrices
c_mat3 = np.eye(el_len + 1) / h
# Get K-matrix
k_matrix = self._calc_k_matrix()
# Define matrixes for C to A transformation:
tja = np.eye(el_len + 2)[:-1, ]
tjp1a = np.eye(el_len + 2, k=1)[:-1, ]
# Define spline coefficients
e_mat0 = tja
e_mat1 = np.dot(tja, k_matrix)
e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \
np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)
e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \
np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)
return e_mat0, e_mat1, e_mat2, e_mat3
if __name__ == '__main__':
from scipy.io import loadmat
import matplotlib.pyplot as plt
#loading test data
test_data = loadmat('test_data.mat')
#prepare lfp data for use, by changing the units to SI and append quantities,
#along with electrode geometry, conductivities and assumed source geometry
lfp_data = test_data['pot1'] * 1E-6 * pq.V # [uV] -> [V]
z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m # [m]
diam = 500E-6 * pq.m # [m]
h = 100E-6 * pq.m # [m]
sigma = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
sigma_top = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
# Input dictionaries for each method
delta_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
step_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'h' : h, # source thickness
'sigma' : sigma,
'sigma_top' : sigma,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : 201, # Spatial CSD upsampling to N steps
'tol' : 1E-12,
'f_type' : 'gaussian',
'f_order' : (20, 5),
}
std_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
#Create the different CSD-method class instances. We use the class methods
#get_csd() and filter_csd() below to get the raw and spatially filtered
#versions of the current-source density estimates.
csd_dict = dict(
delta_icsd = DeltaiCSD(**delta_input),
step_icsd = StepiCSD(**step_input),
spline_icsd = SplineiCSD(**spline_input),
std_csd = StandardCSD(**std_input),
)
#plot
for method, csd_obj in list(csd_dict.items()):
fig, axes = plt.subplots(3,1, figsize=(8,8))
#plot LFP signal
ax = axes[0]
im = ax.imshow(np.array(lfp_data), origin='upper', vmin=-abs(lfp_data).max(), \
vmax=abs(lfp_data).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
cb = plt.colorbar(im, ax=ax)
cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
ax.set_xticklabels([])
ax.set_title('LFP')
ax.set_ylabel('ch #')
#plot raw csd estimate
csd = csd_obj.get_csd()
ax = axes[1]
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name)
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_xticklabels([])
ax.set_ylabel('ch #')
#plot spatially filtered csd estimate
ax = axes[2]
csd = csd_obj.filter_csd(csd)
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name + ', filtered')
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_ylabel('ch #')
ax.set_xlabel('timestep')
plt.show()
| bsd-3-clause |
mgahsan/QuantEcon.py | quantecon/models/solow/model.py | 7 | 38654 | r"""
======================
The Solow Growth Model
======================
The following summary of the [solow1956] model of economic growth
largely follows [romer2011].
Assumptions
===========
The production function
----------------------------------------------
The [solow1956] model of economic growth focuses on the behavior of four
variables: output, `Y`, capital, `K`, labor, `L`, and knowledge (or
technology or the ``effectiveness of labor''), `A`. At each point in
time the economy has some amounts of capital, labor, and knowledge that
can be combined to produce output according to some production function,
`F`.
.. math::
Y(t) = F(K(t), A(t)L(t))
where `t` denotes time.
The evolution of the inputs to production
-----------------------------------------
The initial levels of capital, :math:`K_0`, labor, :math:`L_0`, and
technology, :math:`A_0`, are taken as given. Labor and technology are
assumed to grow at constant rates:
.. math::
\dot{A}(t) = gA(t)
\dot{L}(t) = nL(t)
where the rate of technological progrss, `g`, and the population growth
rate, `n`, are exogenous parameters.
Output is divided between consumption and investment. The fraction of
output devoted to investment, :math:`0 < s < 1`, is exogenous and
constant. One unit of output devoted to investment yields one unit of
new capital. Capital is assumed to decpreciate at a rate :math:`0\le
\delta`. Thus aggregate capital stock evolves according to
.. math::
\dot{K}(t) = sY(t) - \delta K(t).
Although no restrictions are placed on the rates of technological
progress and population growth, the sum of `g`, `n`, and :math:`delta`
is assumed to be positive.
The dynamics of the model
=========================
Because the economy is growing over time (due to exogenous technological
progress and population growth) it is useful to focus on the behavior of
capital stock per unit of effective labor, :math:`k\equiv K/AL`.
Applying the chain rule to the equation of motion for capital stock
yields (after a bit of algebra!) an equation of motion for capital stock
per unit of effective labor.
.. math::
\dot{k}(t) = s f(k) - (g + n + \delta)k(t)
References
==========
.. [romer2011] D. Romer. *Advanced Macroeconomics, 4th edition*, MacGraw Hill, 2011.
.. [solow1956] R. Solow. *A contribution to the theory of economic growth*, Quarterly Journal of Economics, 70(1):64-95, 1956.
@author : David R. Pugh
@date : 2014-11-27
"""
from __future__ import division
import collections
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize
import sympy as sym
from ... import ivp
from . import impulse_response
# declare key variables for the model
t, X = sym.symbols('t'), sym.DeferredVector('X')
A, k, K, L, Y = sym.symbols('A, k, K, L, Y')
# declare required model parameters
g, n, s, delta = sym.symbols('g, n, s, delta')
class Model(object):
__intensive_output = None
__mpk = None
__numeric_jacobian = None
__numeric_solow_residual = None
__numeric_system = None
_modules = [{'ImmutableMatrix': np.array}, "numpy"]
_required_params = ['g', 'n', 's', 'delta', 'A0', 'L0']
def __init__(self, output, params):
"""
Create an instance of the Solow growth model.
Parameters
----------
output : sym.Basic
Symbolic expression defining the aggregate production
function.
params : dict
Dictionary of model parameters.
"""
self.irf = impulse_response.ImpulseResponse(self)
self.output = output
self.params = params
def __repr__(self):
"""Machine readable summary of a Model instance."""
return self.__str__()
def __str__(self):
"""Human readable summary of a Model instance."""
m = """
Solow (1956) model of economic growth:
- Output : {Y}
- A0 (initial level of technology) : {A0:g}
- L0 (initial amount of available labor) : {L0:g}
- g (growth rate of technology) : {g:g}
- n (growth rate of the labor force) : {n:g}
- s (savings rate) : {s:g}
- delta (depreciation rate of physical capital) : {delta:g}
"""
formatted_str = dedent(m.format(Y=self.output,
A0=self.params['A0'],
L0=self.params['L0'],
g=self.params['g'],
n=self.params['n'],
s=self.params['s'],
delta=self.params['delta']))
return formatted_str
@property
def _intensive_output(self):
"""
:getter: Return vectorized symbolic intensive aggregate production.
:type: function
"""
if self.__intensive_output is None:
args = [k] + sym.symbols(list(self.params.keys()))
self.__intensive_output = sym.lambdify(args, self.intensive_output,
self._modules)
return self.__intensive_output
@property
def _mpk(self):
"""
:getter: Return vectorized symbolic marginal product capital.
:type: function
"""
if self.__mpk is None:
args = [k] + sym.symbols(list(self.params.keys()))
self.__mpk = sym.lambdify(args, self.marginal_product_capital,
self._modules)
return self.__mpk
@property
def _numeric_jacobian(self):
"""
Vectorized, numpy-aware function defining the Jacobian matrix of
partial derivatives.
:getter: Return vectorized Jacobian matrix of partial derivatives.
:type: function
"""
if self.__numeric_jacobian is None:
self.__numeric_jacobian = sym.lambdify(self._symbolic_args,
self._symbolic_jacobian,
self._modules)
return self.__numeric_jacobian
@property
def _numeric_solow_residual(self):
"""
Vectorized, numpy-aware function defining the Solow residual.
:getter: Return vectorized symbolic Solow residual.
:type: function
"""
if self.__numeric_solow_residual is None:
tmp_args = [Y, K, L] + sym.symbols(list(self.params.keys()))
self.__numeric_solow_residual = sym.lambdify(tmp_args,
self.solow_residual,
self._modules)
return self.__numeric_solow_residual
@property
def _numeric_system(self):
"""
Vectorized, numpy-aware function defining the system of ODEs.
:getter: Return vectorized symbolic system of ODEs.
:type: function
"""
if self.__numeric_system is None:
self.__numeric_system = sym.lambdify(self._symbolic_args,
self._symbolic_system,
self._modules)
return self.__numeric_system
@property
def _symbolic_args(self):
"""
List of symbolic arguments used in constructing vectorized
versions of _symbolic_system and _symbolic_jacobian.
:getter: Return list of symbolic arguments.
:type: list
"""
args = [t, X] + sym.symbols(list(self.params.keys()))
return args
@property
def _symbolic_jacobian(self):
"""
Symbolic Jacobian matrix for the system of ODEs.
:getter: Return the symbolic Jacobian matrix.
:type: sym.MutableDenseMatrix
"""
N = self._symbolic_system.shape[0]
return self._symbolic_system.jacobian([X[i] for i in range(N)])
@property
def _symbolic_system(self):
"""
Symbolic matrix defining the system of ODEs.
:getter: Return the matrix defining the system of ODEs.
:type: sym.MutableDenseMatrix
"""
change_of_vars = {k: X[0]}
return sym.Matrix([self.k_dot]).subs(change_of_vars)
@property
def effective_depreciation_rate(self):
"""
Effective depreciation rate for capital stock (per unit
effective labor).
:getter: Return the current effective depreciation rate.
:type: float
Notes
-----
The effective depreciation rate of physical capital takes into
account both technological progress and population growth, as
well as physical depreciation.
"""
return sum(self.params[key] for key in ['g', 'n', 'delta'])
@property
def intensive_output(self):
r"""
Symbolic expression for the intensive form of aggregate
production.
:getter: Return the current intensive production function.
:type: sym.Basic
Notes
-----
The assumption of constant returns to scale allows us to work
the the intensive form of the aggregate production function,
`F`. Defining :math:`c=1/AL` one can write
..math::
F\bigg(\frac{K}{AL}, 1\bigg) = \frac{1}{AL}F(A, K, L)
Defining :math:`k=K/AL` and :math:`y=Y/AL` to be capital per
unit effective labor and output per unit effective labor,
respectively, the intensive form of the production function can
be written as
.. math::
y = f(k).
Additional assumptions are that `f` satisfies :math:`f(0)=0`, is
concave (i.e., :math:`f'(k) > 0, f''(k) < 0`), and satisfies the
Inada conditions:
.. math::
:type: eqnarray
\lim_{k \rightarrow 0} &=& \infty \\
\lim_{k \rightarrow \infty} &=& 0
The [inada1964]_ conditions are sufficient (but not necessary!)
to ensure that the time path of capital per effective worker
does not explode.
.. [inada1964] K. Inda. *Some structural characteristics of Turnpike Theorems*, Review of Economic Studies, 31(1):43-58, 1964.
"""
return self.output.subs({'A': 1.0, 'K': k, 'L': 1.0})
@property
def ivp(self):
r"""
Initial value problem
:getter: Return an instance of the ivp.IVP class representing
the Solow model.
:type: ivp.IVP
Notes
-----
The Solow model with can be formulated as an initial value
problem (IVP) as follows.
.. math::
\dot{k}(t) = sf(k(t)) - (g + n + \delta)k(t),\ t\ge t_0,\ k(t_0) = k_0
The solution to this IVP is a function :math:`k(t)` describing
the time path of capital stock (per unit effective labor).
"""
tmp_ivp = ivp.IVP(self._numeric_system, self._numeric_jacobian)
tmp_ivp.f_params = tuple(self.params.values())
tmp_ivp.jac_params = tuple(self.params.values())
return tmp_ivp
@property
def k_dot(self):
r"""
Symbolic expression for the equation of motion for capital (per
unit effective labor).
:getter: Return the current equation of motion for capital (per
unit effective labor).
:type: sym.Basic
Notes
-----
Because the economy is growing over time due to technological
progress, `g`, and population growth, `n`, it makes sense to
focus on the capital stock per unit effective labor, `k`, rather
than aggregate physical capital, `K`. Since, by definition,
:math:`k=K/AL`, we can apply the chain rule to the time derative
of `k`.
.. math::
:type: eqnarray
\dot{k}(t) =& \frac{\dot{K}(t)}{A(t)L(t)} - \frac{K(t)}{[A(t)L(t)]^2}\bigg[\dot{A}(t)L(t) + \dot{L}(t)A(t)\bigg] \\
=& \frac{\dot{K}(t)}{A(t)L(t)} - \bigg(\frac{\dot{A}(t)}{A(t)} + \frac{\dot{L}(t)}{L(t)}\bigg)\frac{K(t)}{A(t)L(t)}
By definition, math:`k=K/AL`, and by assumption
:math:`\dot{A}/A` and :math:`\dot{L}/L` are `g` and `n`
respectively. Aggregate capital stock evolves according to
.. math::
\dot{K}(t) = sF(K(t), A(t)L(t)) - \delta K(t).
Substituting these facts into the above equation yields the
equation of motion for capital stock (per unit effective labor).
.. math::
:type: eqnarray
\dot{k}(t) =& \frac{sF(K(t), A(t)L(t)) - \delta K(t)}{A(t)L(t)} - (g + n)k(t) \\
=& \frac{sY(t)}{A(t)L(t)} - (g + n + \delta)k(t) \\
=& sf(k(t)) - (g + n + \delta)k(t)
"""
return s * self.intensive_output - (g + n + delta) * k
@property
def marginal_product_capital(self):
r"""
Symbolic expression for the marginal product of capital (per
unit effective labor).
:getter: Return the current marginal product of capital (per
unit effective labor).
:type: sym.Basic
Notes
-----
The marginal product of capital is defined as follows:
.. math::
\frac{\partial F(K, AL)}{\partial K} \equiv f'(k)
where :math:`k=K/AL` is capital stock (per unit effective labor)
"""
return sym.diff(self.intensive_output, k)
@property
def output(self):
r"""
Symbolic expression for the aggregate production function.
:getter: Return the current aggregate production function.
:setter: Set a new aggregate production function
:type: sym.Basic
Notes
-----
At each point in time the economy has some amounts of capital,
`K`, labor, `L`, and knowledge (or technology), `A`, that can be
combined to produce output, `Y`, according to some function,
`F`.
.. math::
Y(t) = F(K(t), A(t)L(t))
where `t` denotes time. Note that `A` and `L` are assumed to
enter multiplicatively. Typically `A(t)L(t)` denotes "effective
labor", and technology that enters in this fashion is known as
labor-augmenting or "Harrod neutral."
A key assumption of the model is that the function `F` exhibits
constant returns to scale in capital and labor inputs.
Specifically,
.. math::
F(cK(t), cA(t)L(t)) = cF(K(t), A(t)L(t)) = cY(t)
for any :math:`c \ge 0`.
"""
return self._output
@property
def params(self):
"""
Dictionary of model parameters.
:getter: Return the current dictionary of model parameters.
:setter: Set a new dictionary of model parameters.
:type: dict
Notes
-----
The following parameters are required:
A0: float
Initial level of technology. Must satisfy :math:`A_0 > 0 `.
L0: float
Initial amount of available labor. Must satisfy
:math:`L_0 > 0 `.
g : float
Growth rate of technology.
n : float
Growth rate of the labor force.
s : float
Savings rate. Must satisfy `0 < s < 1`.
delta : float
Depreciation rate of physical capital. Must satisfy
:math:`0 < \delta`.
Although no restrictions are placed on the rates of
technological progress and population growth, the sum of `g`,
`n`, and :math:`delta` is assumed to be positive. The user mus
also specify any additional model parameters specific to the
chosen aggregate production function.
"""
return self._params
@property
def solow_residual(self):
"""
Symbolic expression for the Solow residual which is used as a
measure of technology.
:getter: Return the symbolic expression.
:type: sym.Basic
"""
return sym.solve(Y - self.output, A)[0]
@property
def speed_of_convergence(self):
r"""
The speed of convergence for the Solow model.
:getter: Return the current speed of convergence.
:type: float
Notes
-----
The following is a derivation for the speed of convergence
:math:`\lambda`:
.. :math::
:type: eqnarray
\lambda \equiv -\frac{\partial \dot{k}(k(t))}{\partial k(t)}\bigg|_{k(t)=k^*} =& -[sf'(k^*) - (g + n+ \delta)] \\
=& (g + n+ \delta) - sf'(k^*) \\
=& (g + n + \delta) - (g + n + \delta)\frac{k^*f'(k^*)}{f(k^*)} \\
=& (1 - \alpha_K(k^*))(g + n + \delta)
where the elasticity of output with respect to capital,
$\alpha_K(k)$, is defined as
.. :math::
\alpha_K(k) = \frac{k'(k)}{f(k)}.
"""
alpha_K = self.evaluate_output_elasticity(self.steady_state)
return (1 - alpha_K) * self.effective_depreciation_rate
@property
def steady_state(self):
r"""
Steady state value of capital stock (per unit effective labor).
:getter: Return the current steady state value.
:type: float
Notes
-----
The steady state value of capital stock (per unit effective
labor), `k`, is defined as the value of `k` that solves
.. math::
0 = sf(k) - (g + n + \delta)k
where `s` is the savings rate, `f(k)` is intensive output, and
:math:`g + n + \delta` is the effective depreciation rate.
"""
lower, upper = 1e-12, 1e12
return self.find_steady_state(lower, upper)
@output.setter
def output(self, value):
"""Set a new production function."""
self._output = self._validate_output(value)
self._clear_cache()
@params.setter
def params(self, value):
"""Set a new parameter dictionary."""
valid_params = self._validate_params(value)
self._params = self._order_params(valid_params)
def _clear_cache(self):
"""Clear cached values."""
self.__intensive_output = None
self.__mpk = None
self.__numeric_jacobian = None
self.__numeric_solow_residual = None
self.__numeric_system = None
@staticmethod
def _order_params(params):
"""Cast a dictionary to an order dictionary."""
return collections.OrderedDict(sorted(params.items()))
def _validate_output(self, output):
"""Validate the production function."""
if not isinstance(output, sym.Basic):
mesg = ("Output must be an instance of {}.".format(sym.Basic))
raise AttributeError(mesg)
elif not ({A, K, L} < output.atoms()):
mesg = ("Output must be an expression of technology, 'A', " +
"capital, 'K', and labor, 'L'.")
raise AttributeError(mesg)
else:
return output
def _validate_params(self, params):
"""Validate the model parameters."""
if not isinstance(params, dict):
mesg = "SolowModel.params must be a dict, not a {}."
raise AttributeError(mesg.format(params.__class__))
elif not set(self._required_params) <= set(params.keys()):
mesg = "One of the required params in {} has not been specified."
raise AttributeError(mesg.format(self._required_params))
elif params['s'] <= 0.0 or params['s'] >= 1.0:
raise AttributeError('Savings rate must be in (0, 1).')
elif params['delta'] <= 0.0 or params['delta'] >= 1.0:
raise AttributeError('Depreciation rate must be in (0, 1).')
elif params['g'] + params['n'] + params['delta'] <= 0.0:
raise AttributeError("Sum of g, n, and delta must be positive.")
elif params['A0'] <= 0.0:
mesg = "Initial value for technology must be strictly positive."
raise AttributeError(mesg)
elif params['L0'] <= 0.0:
mesg = "Initial value for labor supply must be strictly positive."
raise AttributeError(mesg)
else:
return params
def evaluate_actual_investment(self, k):
"""
Return the amount of output (per unit of effective labor)
invested in the production of new capital.
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
actual_inv : array_like (float)
Investment (per unit of effective labor)
"""
actual_inv = self.params['s'] * self.evaluate_intensive_output(k)
return actual_inv
def evaluate_consumption(self, k):
"""
Return the amount of consumption (per unit of effective labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
c : ndarray (float)
Consumption (per unit of effective labor)
"""
c = (self.evaluate_intensive_output(k) -
self.evaluate_actual_investment(k))
return c
def evaluate_effective_depreciation(self, k):
"""
Return amount of Capital stock (per unit of effective labor)
that depreciaties due to technological progress, population
growth, and physical depreciation.
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
effective_depreciation : array_like (float)
Amount of depreciated Capital stock (per unit of effective
labor)
"""
effective_depreciation = self.effective_depreciation_rate * k
return effective_depreciation
def evaluate_intensive_output(self, k):
"""
Return the amount of output (per unit of effective labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
y : ndarray (float)
Output (per unit of effective labor)
"""
y = self._intensive_output(k, *self.params.values())
return y
def evaluate_k_dot(self, k):
"""
Return time derivative of capital stock (per unit of effective
labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
k_dot : ndarray (float)
Time derivative of capital stock (per unit of effective
labor).
"""
k_dot = (self.evaluate_actual_investment(k) -
self.evaluate_effective_depreciation(k))
return k_dot
def evaluate_mpk(self, k):
"""
Return marginal product of capital stock (per unit of effective
labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
mpk : ndarray (float)
Marginal product of capital stock (per unit of effective
labor).
"""
mpk = self._mpk(k, *self.params.values())
return mpk
def evaluate_output_elasticity(self, k):
"""
Return elasticity of output with respect to capital stock (per
unit effective labor).
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
alpha_k : array_like (float)
Elasticity of output with respect to capital stock (per unit
effective labor).
Notes
-----
Under the additional assumption that markets are perfectly
competitive, the elasticity of output with respect to capital
stock is equivalent to capital's share of income. Since, under
perfect competition, firms earn zero profits it must be true
capital's share and labor's share must sum to one.
"""
alpha_k = (k*self.evaluate_mpk(k)) / self.evaluate_intensive_output(k)
return alpha_k
def evaluate_solow_residual(self, Y, K, L):
"""
Return Solow residual.
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
residual : array_like (float)
Solow residual
"""
residual = self._numeric_solow_residual(Y, K, L, *self.params.values())
assert residual.all() > 0, "Solow residual show always be positive!"
return residual
def find_steady_state(self, a, b, method='brentq', **kwargs):
"""
Compute the equilibrium value of capital stock (per unit
effective labor).
Parameters
----------
a : float
One end of the bracketing interval [a,b].
b : float
The other end of the bracketing interval [a,b]
method : str (default=`brentq`)
Method to use when computing the steady state. Supported
methods are `bisect`, `brenth`, `brentq`, `ridder`. See
`scipy.optimize` for more details (including references).
kwargs : optional
Additional keyword arguments. Keyword arguments are method
specific see `scipy.optimize` for details.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In
particular, ``r.converged`` is True if the routine
converged.
"""
if method == 'bisect':
result = optimize.bisect(self.evaluate_k_dot, a, b, **kwargs)
elif method == 'brenth':
result = optimize.brenth(self.evaluate_k_dot, a, b, **kwargs)
elif method == 'brentq':
result = optimize.brentq(self.evaluate_k_dot, a, b, **kwargs)
elif method == 'ridder':
result = optimize.ridder(self.evaluate_k_dot, a, b, **kwargs)
else:
mesg = ("Method must be one of : 'bisect', 'brenth', 'brentq', " +
"or 'ridder'.")
raise ValueError(mesg)
return result
def linearized_solution(self, t, k0):
"""
Compute the linearized solution for the Solow model.
Parameters
----------
t : ndarray (shape=(T,))
Array of points at which the solution is desired.
k0 : (float)
Initial condition for capital stock (per unit of effective
labor)
Returns
-------
linearized_traj : ndarray (shape=t.size, 2)
Array representing the linearized solution trajectory.
"""
kt = (self.steady_state + np.exp(-self.speed_of_convergence * t) *
(k0 - self.steady_state))
linearized_traj = np.hstack((t[:, np.newaxis], kt[:, np.newaxis]))
return linearized_traj
def plot_factor_shares(self, ax, Nk=1e3, **new_params):
"""
Plot income/output shares of capital and labor inputs to
production.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
capitals_share_line : maplotlib.lines.Line2D
A Line2D object representing the time path for capital's
share of income.
labors_share_line : maplotlib.lines.Line2D
A Line2D object representing the time path for labor's
share of income.
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
capitals_share = self.evaluate_output_elasticity(k_grid)
labors_share = 1 - capitals_share
capitals_share_line, = ax.plot(k_grid, capitals_share, 'r-',
label=r'$\alpha_K(k(t))$')
labors_share_line, = ax.plot(k_grid, labors_share, 'b-',
label=r'$1 - \alpha_K(k(t))$')
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_title('Factor shares', family='serif', fontsize=20)
ax.grid(True)
ax.legend(loc=0, frameon=False, prop={'family': 'serif'},
bbox_to_anchor=(1.0, 1.0))
return [capitals_share_line, labors_share_line]
def plot_intensive_output(self, ax, Nk=1e3, **new_params):
"""
Plot intensive form of the aggregate production function.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
intensive_output : maplotlib.lines.Line2D
A Line2D object representing intensive output as a function
of capital stock (per unit effective labor).
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
y_grid = self.evaluate_intensive_output(k_grid)
intensive_output_line, = ax.plot(k_grid, y_grid, 'r-')
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_ylabel('$f(k(t))$', family='serif', fontsize=20,
rotation='horizontal')
ax.yaxis.set_label_coords(-0.1, 0.5)
ax.set_title('Output (per unit effective labor)',
family='serif', fontsize=20)
ax.grid(True)
return [intensive_output_line]
def plot_intensive_investment(self, ax, Nk=1e3, **new_params):
"""
Plot actual investment (per unit effective labor) and effective
depreciation. The steady state value of capital stock (per unit
effective labor) balance acual investment and effective
depreciation.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
actual_investment_line : maplotlib.lines.Line2D
A Line2D object representing the level of actual investment
as a function of capital stock (per unit effective labor).
breakeven_investment_line : maplotlib.lines.Line2D
A Line2D object representing the "break-even" level of
investment as a function of capital stock (per unit
effective labor).
ss_line : maplotlib.lines.Line2D
A Line2D object representing the steady state level of
investment.
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
actual_investment_grid = self.evaluate_actual_investment(k_grid)
breakeven_investment_grid = self.evaluate_effective_depreciation(k_grid)
ss_investment = self.evaluate_actual_investment(self.steady_state)
actual_investment_line, = ax.plot(k_grid, actual_investment_grid, 'g-',
label='$sf(k(t))$')
breakeven_investment_line, = ax.plot(k_grid, breakeven_investment_grid,
'b-', label='$(g + n + \delta)k(t)$')
ss_line, = ax.plot(self.steady_state, ss_investment, 'ko',
label='$k^*={0:.4f}$'.format(self.steady_state))
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_ylabel('Investment (per unit effective labor)', family='serif',
fontsize=15)
ax.set_title('Output (per unit effective labor)',
family='serif', fontsize=20)
ax.grid(True)
ax.legend(loc=0, frameon=False, prop={'family': 'serif'},
bbox_to_anchor=(1.0, 1.0))
return [actual_investment_line, breakeven_investment_line, ss_line]
def plot_phase_diagram(self, ax, Nk=1e3, **new_params):
"""
Plot the model's phase diagram.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
k_dot_line : maplotlib.lines.Line2D
A Line2D object representing the rate of change of capital
stock (per unit effective labor) as a function of its level.
origin_line : maplotlib.lines.Line2D
A Line2D object representing the origin (i.e., locus of
points where k_dot is zero).
ss_line : maplotlib.lines.Line2D
A Line2D object representing the steady state level of
capital stock (per unit effective labor).
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
k_dot_line, = ax.plot(k_grid, self.evaluate_k_dot(k_grid),
color='orange')
origin_line = ax.axhline(0, color='k')
ss_line, = ax.plot(self.steady_state, 0.0, 'ko',
label='$k^*={0:.4f}$'.format(self.steady_state))
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_ylabel('$\dot{k}(t)$', family='serif', fontsize=25,
rotation='horizontal')
ax.yaxis.set_label_coords(-0.1, 0.5)
ax.set_title('Phase diagram', family='serif', fontsize=20)
ax.grid(True)
return [k_dot_line, origin_line, ss_line]
def plot_solow_diagram(self, ax, Nk=1e3, **new_params):
"""
Plot the classic Solow diagram.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
actual_investment_line : maplotlib.lines.Line2D
A Line2D object representing the level of actual investment
as a function of capital stock (per unit effective labor).
breakeven_investment_line : maplotlib.lines.Line2D
A Line2D object representing the "break-even" level of
investment as a function of capital stock (per unit
effective labor).
ss_line : maplotlib.lines.Line2D
A Line2D object representing the steady state level of
investment.
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
intensive_output_grid = self.evaluate_intensive_output(k_grid)
actual_investment_grid = self.evaluate_actual_investment(k_grid)
breakeven_investment_grid = self.evaluate_effective_depreciation(k_grid)
ss_investment = self.evaluate_actual_investment(self.steady_state)
intensive_output_line, = ax.plot(k_grid, intensive_output_grid, 'r-',
label='$f(k(t)$')
actual_investment_line, = ax.plot(k_grid, actual_investment_grid, 'g-',
label='$sf(k(t))$')
breakeven_investment_line, = ax.plot(k_grid, breakeven_investment_grid,
'b-', label='$(g + n + \delta)k(t)$')
ss_line, = ax.plot(self.steady_state, ss_investment, 'ko',
label='$k^*={0:.4f}$'.format(self.steady_state))
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_title('Solow diagram',
family='serif', fontsize=20)
ax.grid(True)
ax.legend(loc=0, frameon=False, prop={'family': 'serif'},
bbox_to_anchor=(1, 1))
lines = [intensive_output_line, actual_investment_line,
breakeven_investment_line, ss_line]
return lines
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
aerler/GeoPy | src/plotting/archive/lineplots.py | 1 | 9662 | '''
Created on 2014-03-16
some useful plotting functions that take advantage of variable meta data
@author: Andre R. Erler, GPL v3
'''
# external imports
from types import NoneType
from warnings import warn
import numpy as np
import matplotlib as mpl
# import matplotlib.pylab as pyl
# #from mpl_toolkits.axes_grid1 import ImageGrid
# linewidth = .75
# mpl.rc('lines', linewidth=linewidth)
# if linewidth == 1.5: mpl.rc('font', size=12)
# elif linewidth == .75: mpl.rc('font', size=8)
# else: mpl.rc('font', size=10)
# # prevent figures from closing: don't run in interactive mode, or plt.show() will not block
# pyl.ioff()
# internal imports
from utils.signalsmooth import smooth
from plotting.misc import getPlotValues, getFigAx, updateSubplots
from geodata.base import Variable
from geodata.misc import AxisError, ListError
#import pdb
#pdb.set_trace()
def linePlot(varlist, ax=None, fig=None, linestyles=None, varatts=None, legend=None,
xline=None, yline=None, title=None, flipxy=None, xlabel=None, ylabel=None, xlim=None,
ylim=None, lsmooth=False, lprint=False, **kwargs):
''' A function to draw a list of 1D variables into an axes, and annotate the plot based on variable properties. '''
warn('Deprecated function: use Figure or Axes class methods.')
# create axes, if necessary
if ax is None:
if fig is None: fig,ax = getFigAx(1) # single panel
else: ax = fig.axes[0]
# varlist is the list of variable objects that are to be plotted
#print varlist
if isinstance(varlist,Variable): varlist = [varlist]
elif not isinstance(varlist,(tuple,list)) or not all([isinstance(var,Variable) for var in varlist]): raise TypeError
for var in varlist: var.squeeze() # remove singleton dimensions
# linestyles is just a list of line styles for each plot
if isinstance(linestyles,(str,NoneType)): linestyles = [linestyles]*len(varlist)
elif not isinstance(linestyles,(tuple,list)):
if not all([isinstance(linestyles,str) for var in varlist]): raise TypeError
if len(varlist) != len(linestyles): raise ListError("Failed to match linestyles to varlist!")
# varatts are variable-specific attributes that are parsed for special keywords and then passed on to the
if varatts is None: varatts = [dict()]*len(varlist)
elif isinstance(varatts,dict):
tmp = [varatts[var.name] if var.name in varatts else dict() for var in varlist]
if any(tmp): varatts = tmp # if any variable names were found
else: varatts = [varatts]*len(varlist) # assume it is one varatts dict, which will be used for all variables
elif not isinstance(varatts,(tuple,list)): raise TypeError
if not all([isinstance(atts,dict) for atts in varatts]): raise TypeError
# check axis: they need to have only one axes, which has to be the same for all!
if len(varatts) != len(varlist): raise ListError("Failed to match varatts to varlist!")
for var in varlist:
if var.ndim > 1: raise AxisError("Variable '{}' has more than one dimension; consider squeezing.".format(var.name))
elif var.ndim == 0: raise AxisError("Variable '{}' is a scalar; consider display as a line.".format(var.name))
# loop over variables
plts = []; varname = None; varunits = None; axname = None; axunits = None # list of plot handles
for var,linestyle,varatt in zip(varlist,linestyles,varatts):
varax = var.axes[0]
# scale axis and variable values
axe, axunits, axname = getPlotValues(varax, checkunits=axunits, checkname=None)
val, varunits, varname = getPlotValues(var, checkunits=varunits, checkname=None)
# variable and axis scaling is not always independent...
if var.plot is not None and varax.plot is not None:
if 'preserve' in var.plot and 'scalefactor' in varax.plot:
if varax.units != axunits and var.plot.preserve == 'area':
val /= varax.plot.scalefactor
# figure out keyword options
kwatts = kwargs.copy(); kwatts.update(varatt) # join individual and common attributes
if 'label' not in kwatts: kwatts['label'] = var.name # default label: variable name
# N.B.: other scaling behavior could be added here
if lprint: print(varname, varunits, val.mean())
if lsmooth: val = smooth(val)
# figure out orientation
if flipxy: xx,yy = val, axe
else: xx,yy = axe, val
# call plot function
if linestyle is None: plts.append(ax.plot(xx, yy, **kwatts)[0])
else: plts.append(ax.plot(xx, yy, linestyle, **kwatts)[0])
# set axes limits
if isinstance(xlim,(list,tuple)) and len(xlim)==2: ax.set_xlim(*xlim)
elif xlim is not None: raise TypeError
if isinstance(ylim,(list,tuple)) and len(ylim)==2: ax.set_ylim(*ylim)
elif ylim is not None: raise TypeError
# set title
if title is not None:
ax.set_title(title, dict(fontsize='medium'))
pos = ax.get_position()
pos = pos.from_bounds(x0=pos.x0, y0=pos.y0, width=pos.width, height=pos.height-0.03)
ax.set_position(pos)
# set axes labels
if flipxy: xname,xunits,yname,yunits = varname,varunits,axname,axunits
else: xname,xunits,yname,yunits = axname,axunits,varname,varunits
if not xlabel: xlabel = '{0:s} [{1:s}]'.format(xname,xunits) if xunits else '{0:s}'.format(xname)
else: xlabel = xlabel.format(xname,xunits)
if not ylabel: ylabel = '{0:s} [{1:s}]'.format(yname,yunits) if yunits else '{0:s}'.format(yname)
else: ylabel = ylabel.format(yname,yunits)
# a typical custom label that makes use of the units would look like this: 'custom label [{1:s}]',
# where {} will be replaced by the appropriate default units (which have to be the same anyway)
xpad = 2; xticks = ax.get_xaxis().get_ticklabels()
ypad = -2; yticks = ax.get_yaxis().get_ticklabels()
# len(xticks) > 0 is necessary to avoid errors with AxesGrid, which removes invisible tick labels
if len(xticks) > 0 and xticks[-1].get_visible(): ax.set_xlabel(xlabel, labelpad=xpad)
elif len(yticks) > 0 and not title: yticks[0].set_visible(False) # avoid overlap
if len(yticks) > 0 and yticks[-1].get_visible(): ax.set_ylabel(ylabel, labelpad=ypad)
elif len(xticks) > 0: xticks[0].set_visible(False) # avoid overlap
# make monthly ticks
if axname == 'time' and axunits == 'month':
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2)) # ax.minorticks_on()
# add legend
if legend:
legatts = dict()
if ax.get_yaxis().get_label():
legatts['fontsize'] = ax.get_yaxis().get_label().get_fontsize()
if isinstance(legend,dict): legatts.update(legend)
elif isinstance(legend,(int,np.integer,float,np.inexact)): legatts['loc'] = legend
ax.legend(**legatts)
# add orientation lines
if isinstance(xline,(int,np.integer,float,np.inexact)): ax.axhline(y=xline, color='black')
elif isinstance(xline,dict): ax.axhline(**xline)
if isinstance(yline,(int,np.integer,float,np.inexact)): ax.axvline(x=yline, color='black')
elif isinstance(xline,dict): ax.axvline(**yline)
# return handle
return plts
# add common/shared legend to a multi-panel plot
def addSharedLegend(fig, plts=None, legs=None, fontsize=None, **kwargs):
''' add a common/shared legend to a multi-panel plot '''
# complete input
warn('Deprecated function: use Figure or Axes class methods.')
if legs is None: legs = [plt.get_label() for plt in plts]
elif not isinstance(legs, (list,tuple)): raise TypeError
if not isinstance(plts, (list,tuple,NoneType)): raise TypeError
# figure out fontsize and row numbers
fontsize = fontsize or fig.axes[0].get_yaxis().get_label().get_fontsize() # or fig._suptitle.get_fontsize()
nlen = len(plts) if plts else len(legs)
if fontsize > 11: ncols = 2 if nlen == 4 else 3
else: ncols = 3 if nlen == 6 else 4
# make room for legend
leghgt = np.ceil(nlen/ncols) * fontsize + 0.055
ax = fig.add_axes([0, 0, 1,leghgt]) # new axes to hold legend, with some attributes
ax.set_frame_on(False); ax.axes.get_yaxis().set_visible(False); ax.axes.get_xaxis().set_visible(False)
fig = updateSubplots(fig, mode='shift', bottom=leghgt) # shift bottom upwards
# define legend parameters
legargs = dict(loc=10, ncol=ncols, borderaxespad=0., fontsize=fontsize, frameon=True,
labelspacing=0.1, handlelength=1.3, handletextpad=0.3, fancybox=True)
legargs.update(kwargs)
# create legend and return handle
if plts: legend = ax.legend(plts, legs, **legargs)
else: legend = ax.legend(legs, **legargs)
return legend
# plots with error shading
def addErrorPatch(ax, var, err, color=None, axis=None, xerr=True, alpha=0.25, check=False, cap=-1):
from numpy import append, where, isnan
from matplotlib.patches import Polygon
warn('Deprecated function: use Figure or Axes class methods.')
if isinstance(var,Variable):
if axis is None and var.ndim > 1: raise AxisError
x = var.getAxis(axis).getArray()
y = var.getArray();
if isinstance(err,Variable): e = err.getArray()
else: e = err
else:
if axis is None: raise ValueError
y = axis; x = var; e = err
if check:
e = where(isnan(e),0,e)
if cap > 0: e = where(e>cap,0,e)
if xerr:
ix = append(x-e,(x+e)[::-1])
iy = append(y,y[::-1])
else:
ix = append(y,y[::-1])
iy = append(x-e,(x+e)[::-1])
if color is None: raise NotImplementedError # should take color from plot line (variable)
patch = Polygon(list(zip(ix,iy)), alpha=alpha, facecolor=color, edgecolor=color)
ax.add_patch(patch)
return patch
if __name__ == '__main__':
pass | gpl-3.0 |
joshfuchs/ZZCeti_fitting | finegrid.py | 1 | 3310 | """
Written March 2015
@author: Josh T Fuchs
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from intmodels import models #This interpolates the models to a small grid
'''
:DESCRIPTION: Takes the best Teff and log(g) from the fitting to the coarse grid. Sets up ranges and values to interpolate the grid to smaller Teff and log(g) spacing. Calls intmodels.py that does the actual interpolation.
:INPUTS:
bestT: integer, best-fitting Teff from the coarse grid
bestg: integer, best-fitting log(g) from the coarse grid. In format: log(g) = 8.0 as bestg = 800
'''
script, midt, midg = sys.argv
midt = int(midt)#12500
midg = int(midg)#800
#Choose coarse models to import for interpolation. This is for getting finer log(g)
#For model spacings of 250 K and .25 logg
#testt = [midt-500,midt-250,midt,midt+250,midt+500]
#testg = [midg-50,midg-25,midg,midg+25,midg+50]
#Define coarse model spacings of 100 K and .1 logg
testt, testg = [], []
for n in range(51):
testt.append(midt-2500+100*n)
for m in range(11):
testg.append(midg-50+10*m)
#print testt
#print testg
#exit()
#Set up finer grid for log(g)
#Set final number of models you want
numberg = range(21)
gridg = np.empty(len(numberg))
for n in numberg:
gridg[n] = (midg/100.-0.50 + 0.05*n) #############(midg/100.-0.5 + 0.005*n) (midg/100.-0.25+0.05*n)
#Begin iterating over different Teffs to get finer log(g)'s
for i in testt:
print ''
print 'Now starting with Teffs of ',i
filenames = ['da' + str(i) + '_' + str(x) + '.dk' for x in testg]
grid = gridg
case = 0 # Use 0 for log(g) interp. and 1 for Teff interp. Just a binary switch.
models(filenames,grid,case,midt,midg)
print 'Made it back!'
print 'Done with all Teffs.'
#Now we want to create our finer grid of Teff. We need to read in our interpolated models in logg
#gridg is our set of logg's
#Set up new grid for new Teffs
numbert = range(501)
gridt = np.empty(len(numbert))
for n in numbert:
gridt[n] = midt-2500.+10.*n #########midt-250.+10*n
#Begin iterating over different logg's to get finer Teffs
for i in gridg:
print ''
print 'Now starting with log(g) of ',i
intlogg = str(i * 1000.)
intlogg = intlogg[:-2]
#filenames = ['da' + str(testt[0]) + '_' + intlogg + '.jf','da' + str(testt[1]) + '_' + intlogg + '.jf','da' + str(testt[2]) + '_' + intlogg + '.jf','da' + str(testt[3]) + '_' + intlogg + '.jf','da' + str(testt[4]) + '_' + intlogg + '.jf']
filenames = ['da' + str(x) + '_' + intlogg + '.jf' for x in testt]
grid = gridt
case = 1 # Use 0 for log(g) interp. and 1 for Teff interp. Just a binary switch.
models(filenames,grid,case,midt,midg)
print 'Made it back!'
print 'Done with all the log(g)s.'
'''
#Save file names to interpolated_names.txt
print 'Saving file names to interpolated_names.txt.'
lowt = midt - 1500
lowg = midg*10 - 250
ranget = 10*np.arange(301)#steps of 5 in Teff
rangeg = 50*np.arange(11)#steps of 0.005 in log(g)
f = open('interpolated_names.txt','a')
for y in ranget:
teffwrite = lowt + y
for x in rangeg:
loggwrite = lowg + x
file = 'da' + str(teffwrite) + '_' + str(loggwrite) + '.jf'
f.write(file + '\n')
f.close()
'''
print 'File saved.'
print 'The finer grid is complete!'
| mit |
jseabold/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
jaeilepp/mne-python | mne/tests/test_cov.py | 1 | 24821 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_true
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
from nose.tools import assert_raises
import numpy as np
from scipy import linalg
import warnings
import itertools as itt
from mne.cov import (regularize, whiten_evoked, _estimate_rank_meeg_cov,
_auto_low_rank_model, _apply_scaling_cov,
_undo_scaling_cov, prepare_noise_cov, compute_whitener,
_apply_scaling_array, _undo_scaling_array)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, pick_info, make_ad_hoc_cov)
from mne.io import read_raw_fif, RawArray, read_info
from mne.tests.common import assert_naming, assert_snr
from mne.utils import (_TempDir, slow_test, requires_sklearn_0_15,
run_tests_if_main)
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import channel_type, _picks_by_type
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
with warnings.catch_warnings(record=True) as w:
compute_covariance([epochs, epochs_2])
assert_equal(len(w), 0)
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
assert_raises(ValueError, compute_covariance, [epochs, epochs_2])
assert_equal(len(w), 0)
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
assert_equal(len(w), 0)
compute_covariance([epochs, epochs_2], on_mismatch='warn')
assert_raises(ValueError, compute_covariance, epochs,
on_mismatch='x')
assert_true(any('transform mismatch' in str(ww.message) for ww in w))
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
info = read_info(raw_fname)
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
cov_reg = regularize(cov, info)
cov_reg_reorder = regularize(cov_reorder, info)
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names = compute_whitener(cov, info)
whitener_2, w_ch_names_2 = compute_whitener(cov_reorder, info)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener)
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov():
"""Test ad hoc cov creation and I/O."""
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert_true('Covariance' in repr(cov))
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
def test_io_cov():
"""Test IO for noise covariance matrices."""
tempdir = _TempDir()
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(op.join(tempdir, 'test-cov.fif'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert_true('Covariance' in repr(cov))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
cov_sel.save(op.join(tempdir, 'test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
write_cov(cov_badname, cov)
read_cov(cov_badname)
assert_naming(w, 'test_cov.py', 2)
def test_cov_estimation_on_raw():
"""Test estimation from raw (typically empty room)."""
tempdir = _TempDir()
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
for method in (None, ['empirical']): # None is cast to 'empirical'
cov = compute_raw_covariance(raw, tstep=None, method=method)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e4)
cov = compute_raw_covariance(raw, method=method) # tstep=0.2 (default)
assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 1e2)
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_true(cov_read.ch_names == cov.ch_names)
assert_true(cov_read.nfree == cov.nfree)
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method)
assert_true(cov_mne.ch_names[:5] == cov.ch_names)
assert_snr(cov.data, cov_mne.data[:5, :5], 1e4)
cov = compute_raw_covariance(raw_pick, method=method)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cov = compute_raw_covariance(raw_2, method=method)
assert_true(any('Too few samples' in str(ww.message) for ww in w))
# no epochs found due to rejection
assert_raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
cov = compute_raw_covariance(raw.copy().crop(0, 10.),
tstep=None, method=method,
reject=dict(eog=1000e-6))
@slow_test
@requires_sklearn_0_15
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with warnings.catch_warnings(record=True): # too few samples
warnings.simplefilter('always')
# XXX don't use "shrunk" here, for some reason it makes Travis 2.7
# hang... "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert_true(err < tol, msg='%s >= %s' % (err, tol))
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@slow_test
def test_cov_estimation_with_triggers():
"""Test estimation from raw with triggers."""
tempdir = _TempDir()
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
_assert_cov(cov, read_cov(cov_km_fname))
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert_true(np.all(cov.data != cov_tmin_tmax.data))
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert_true(err < 0.05, msg=err)
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert_true(cov.ch_names == cov2.ch_names)
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
assert_raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
assert_raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='factor_analysis')
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
assert_raises(ValueError, compute_covariance, epochs)
assert_raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with warnings.catch_warnings(record=True) as w: # too few samples warning
warnings.simplefilter('always')
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
cov = compute_covariance(epochs, projs=[])
assert_equal(len(w), 2)
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with warnings.catch_warnings(record=True): # samples
compute_covariance(epochs)
# projs checking
compute_covariance(epochs, projs=[])
assert_raises(TypeError, compute_covariance, epochs, projs='foo')
assert_raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert_true(cov.ch_names == cov_sum.ch_names)
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert_true(cov_sum.ch_names == cov.ch_names)
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads')
assert_true(noise_cov['dim'] == reg_noise_cov['dim'])
assert_true(noise_cov['data'].shape == reg_noise_cov['data'].shape)
assert_true(np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08)
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert_true(np.all(mean_baseline < 1.))
assert_true(np.all(mean_baseline > 0.2))
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
assert_raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
@slow_test
def test_rank():
"""Test cov rank estimation."""
# Test that our rank estimation works properly on a simple case
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=False)
cov = read_cov(cov_fname)
ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and
ch.startswith('EEG')]
cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
assert_equal(cov['eig'][0], 0.) # avg projector should set this to zero
assert_true((cov['eig'][1:] > 0).all()) # all else should be > 0
# Now do some more comprehensive tests
raw_sample = read_raw_fif(raw_fname)
raw_sss = read_raw_fif(hp_fif_fname)
raw_sss.add_proj(compute_proj_raw(raw_sss))
cov_sample = compute_raw_covariance(raw_sample)
cov_sample_proj = compute_raw_covariance(
raw_sample.copy().apply_proj())
cov_sss = compute_raw_covariance(raw_sss)
cov_sss_proj = compute_raw_covariance(
raw_sss.copy().apply_proj())
picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)
info_sample = pick_info(raw_sample.info, picks_all_sample)
picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
eeg=True))]
picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
picks_stack_sample += [('all',
pick_types(info_sample, meg=True, eeg=True))]
info_sss = pick_info(raw_sss.info, picks_all_sss)
picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
picks_stack_somato += [('all',
pick_types(info_sss, meg=True, eeg=True))]
iter_tests = list(itt.product(
[(cov_sample, picks_stack_sample, info_sample),
(cov_sample_proj, picks_stack_sample, info_sample),
(cov_sss, picks_stack_somato, info_sss),
(cov_sss_proj, picks_stack_somato, info_sss)], # sss
[dict(mag=1e15, grad=1e13, eeg=1e6)]
))
for (cov, picks_list, this_info), scalings in iter_tests:
for ch_type, picks in picks_list:
this_very_info = pick_info(this_info, picks)
# compute subset of projs
this_projs = [c['active'] and
len(set(c['data']['col_names'])
.intersection(set(this_very_info['ch_names']))) >
0 for c in cov['projs']]
n_projs = sum(this_projs)
# count channel types
ch_types = [channel_type(this_very_info, idx)
for idx in range(len(picks))]
n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
['eeg', 'mag', 'grad']]
n_meg = n_mag + n_grad
if ch_type in ('all', 'eeg'):
n_projs_eeg = 1
else:
n_projs_eeg = 0
# check sss
if len(this_very_info['proc_history']) > 0:
mf = this_very_info['proc_history'][0]['max_info']
n_free = _get_sss_rank(mf)
if 'mag' not in ch_types and 'grad' not in ch_types:
n_free = 0
# - n_projs XXX clarify
expected_rank = n_free + n_eeg
if n_projs > 0 and ch_type in ('all', 'eeg'):
expected_rank -= n_projs_eeg
else:
expected_rank = n_meg + n_eeg - n_projs
C = cov['data'][np.ix_(picks, picks)]
est_rank = _estimate_rank_meeg_cov(C, this_very_info,
scalings=scalings)
assert_equal(expected_rank, est_rank)
def test_cov_scaling():
"""Test rescaling covs"""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)['data']
cov2 = read_cov(cov_fname)['data']
assert_array_equal(cov, cov2)
evoked.pick_channels([evoked.ch_names[k] for k in pick_types(
evoked.info, meg=True, eeg=True
)])
picks_list = _picks_by_type(evoked.info)
scalings = dict(mag=1e15, grad=1e13, eeg=1e6)
_apply_scaling_cov(cov2, picks_list, scalings=scalings)
_apply_scaling_cov(cov, picks_list, scalings=scalings)
assert_array_equal(cov, cov2)
assert_true(cov.max() > 1)
_undo_scaling_cov(cov2, picks_list, scalings=scalings)
_undo_scaling_cov(cov, picks_list, scalings=scalings)
assert_array_equal(cov, cov2)
assert_true(cov.max() < 1)
data = evoked.data.copy()
_apply_scaling_array(data, picks_list, scalings=scalings)
_undo_scaling_array(data, picks_list, scalings=scalings)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn_0_15
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
assert_equal(len(w), 1)
assert_equal(msg % (n_features + 5, n_features), '%s' % w[0].message)
method_params = {'iter_n_components': [n_features + 5]}
assert_raises(ValueError, _auto_low_rank_model, X, mode='foo',
n_jobs=n_jobs, method_params=method_params, cv=cv)
@slow_test
@requires_sklearn_0_15
def test_compute_covariance_auto_reg():
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:10]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True)
logliks = [c['loglik'] for c in covs]
assert_true(np.diff(logliks).max() <= 0) # descending order
methods = ['empirical',
'factor_analysis',
'ledoit_wolf',
'pca']
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True)
assert_equal(set([c['method'] for c in cov3]),
set(methods))
# invalid prespecified method
assert_raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
run_tests_if_main()
| bsd-3-clause |
RPGOne/scikit-learn | sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
zangsir/sms-tools | lectures/06-Harmonic-model/plots-code/monophonic-polyphonic.py | 3 | 2250 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
plt.figure(1, figsize=(9, 6))
plt.subplot(211)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/carnatic.wav'))
x1 = x[4.35*fs:]
w = np.blackman(1301)
N = 2048
H = 250
t = -70
minSineDur = .02
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (carnatic.wav)')
plt.subplot(212)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vignesh.wav'))
w = np.blackman(1101)
N = 2048
H = 250
t = -90
minSineDur = .1
maxnSines = 200
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vignesh.wav)')
plt.tight_layout()
plt.savefig('monophonic-polyphonic.png')
plt.show() | agpl-3.0 |
mraspaud/dask | dask/array/percentile.py | 2 | 6272 | from __future__ import absolute_import, division, print_function
from functools import wraps
from collections import Iterator
import numpy as np
from toolz import merge, merge_sorted
from .core import Array
from ..base import tokenize
from .. import sharedict
@wraps(np.percentile)
def _percentile(a, q, interpolation='linear'):
if not len(a):
return None
if isinstance(q, Iterator):
q = list(q)
if str(a.dtype) == 'category':
result = np.percentile(a.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.categories, a.ordered)
if np.issubdtype(a.dtype, np.datetime64):
a2 = a.astype('i8')
result = np.percentile(a2, q, interpolation=interpolation)
return result.astype(a.dtype)
if not np.issubdtype(a.dtype, np.number):
interpolation = 'nearest'
return np.percentile(a, q, interpolation=interpolation)
def percentile(a, q, interpolation='linear'):
""" Approximate percentile of 1-D array
See numpy.percentile for more information
"""
if not a.ndim == 1:
raise NotImplementedError(
"Percentiles only implemented for 1-d arrays")
q = np.array(q)
token = tokenize(a, list(q), interpolation)
name = 'percentile_chunk-' + token
dsk = dict(((name, i), (_percentile, (key), q, interpolation))
for i, key in enumerate(a._keys()))
name2 = 'percentile-' + token
dsk2 = {(name2, 0): (merge_percentiles, q, [q] * len(a.chunks[0]),
sorted(dsk), a.chunks[0], interpolation)}
dtype = a.dtype
if np.issubdtype(dtype, np.integer):
dtype = (np.array([], dtype=dtype) / 0.5).dtype
dsk = merge(dsk, dsk2)
dsk = sharedict.merge(a.dask, (name2, dsk))
return Array(dsk, name2, chunks=((len(q),),), dtype=dtype)
def merge_percentiles(finalq, qs, vals, Ns, interpolation='lower'):
""" Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of numpy.arrays
Percentiles calculated on different sets of data.
vals : sequence of numpy.arrays
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see numpy.percentile.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = np.array(finalq)
qs = list(map(list, qs))
vals = list(vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
raise ValueError("No non-trivial arrays found")
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if str(vals[0].dtype) == 'category':
result = merge_percentiles(finalq, qs, [v.codes for v in vals], Ns, interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = 'nearest'
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError('qs, vals, and Ns parameters must be the same length')
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty(len(q))
count[1:] = np.diff(q)
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
# >95% of the time in this function is spent in `merge_sorted` below.
# An alternative that uses numpy sort is shown. It is sometimes
# comparable to, but typically slower than, `merge_sorted`.
#
# >>> A = np.concatenate(map(np.array, map(zip, vals, counts)))
# >>> A.sort(0, kind='mergesort')
combined_vals_counts = merge_sorted(*map(zip, vals, counts))
combined_vals, combined_counts = zip(*combined_vals_counts)
combined_vals = np.array(combined_vals)
combined_counts = np.array(combined_counts)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == 'linear':
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side='left')
right = np.searchsorted(combined_q, desired_q, side='right') - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == 'lower':
rv = combined_vals[lower]
elif interpolation == 'higher':
rv = combined_vals[upper]
elif interpolation == 'midpoint':
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == 'nearest':
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError("interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'")
return rv
| bsd-3-clause |
Vimos/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 42 | 11137 | from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.linalg import eigh
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.testing import assert_true, assert_equal, assert_raises
from sklearn.utils.testing import SkipTest
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_sparse_graph_connected_component():
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in zip(boundaries[:-1], boundaries[1:]):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(.1, 42, size=len(connections))
affinity = coo_matrix((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in zip(boundaries[:-1], boundaries[1:]):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert_equal(component_1.sum(), component_size)
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert_equal(component_2.sum(), component_size)
assert_array_equal(component_1, component_2)
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components] * dd
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
pysb/pysb | pysb/simulator/base.py | 5 | 57380 | from abc import ABCMeta, abstractmethod
import numpy as np
import itertools
import sympy
import collections
from collections.abc import Mapping, Sequence
import numbers
from pysb.core import MonomerPattern, ComplexPattern, as_complex_pattern, \
Parameter, Expression, Model, ComponentSet
from pysb.logging import get_logger, EXTENDED_DEBUG
import pickle
from pysb.export.json import JsonExporter
from pysb.importers.json import model_from_json
from pysb import __version__ as PYSB_VERSION
from datetime import datetime
import dateutil.parser
import copy
from warnings import warn
from pysb.pattern import SpeciesPatternMatcher
from contextlib import contextmanager
import weakref
try:
import pandas as pd
except ImportError:
pd = None
try:
import h5py
except ImportError:
h5py = None
class SimulatorException(Exception):
pass
class InconsistentParameterError(SimulatorException, ValueError):
def __init__(self, parameter_name, value, reason):
super(InconsistentParameterError, self).__init__(
f'Value {value} that was passed for parameter {parameter_name} '
f'was inconsistent with that parameters assumption: {reason}'
)
class Simulator(object):
"""An abstract base class for numerical simulation of models.
.. warning::
The interface for this class is considered experimental and may
change without warning as PySB is updated.
Parameters
----------
model : pysb.Model
Model to simulate.
tspan : vector-like, optional
Time values over which to simulate. The first and last values define
the time range. Returned trajectories are sampled at every value unless
the simulation is interrupted for some reason, e.g., due to
satisfaction
of a logical stopping criterion (see 'tout' below).
initials : vector-like or dict, optional
Values to use for the initial condition of all species. Ordering is
determined by the order of model.species. If not specified, initial
conditions will be taken from model.initials (with initial condition
parameter values taken from `param_values` if specified).
param_values : vector-like or dict, optional
Values to use for every parameter in the model. Ordering is
determined by the order of model.parameters.
If passed as a dictionary, keys must be parameter names.
If not specified, parameter values will be taken directly from
model.parameters.
verbose : bool or int, optional (default: False)
Sets the verbosity level of the logger. See the logging levels and
constants from Python's logging module for interpretation of integer
values. False is equal to the PySB default level (currently WARNING),
True is equal to DEBUG.
Attributes
----------
verbose: bool
Verbosity flag passed to the constructor.
model : pysb.Model
Model passed to the constructor.
tspan : vector-like
Time values passed to the constructor.
Notes
-----
If ``tspan`` is not defined, it may be defined in the call to the
``run`` method.
The dimensionality of ``tout`` depends on whether a single simulation
or multiple simulations are run.
The dimensionalities of ``y``, ``yobs``, ``yobs_view``, ``yexpr``, and
``yexpr_view`` depend on the number of simulations being run as well
as on the type of simulation, i.e., spatial vs. non-spatial.
"""
__metaclass__ = ABCMeta
_supports = { 'multi_initials' : False,
'multi_param_values' : False }
@abstractmethod
def __init__(self, model, tspan=None, initials=None,
param_values=None, verbose=False, **kwargs):
# Get or create base a PySB logger for this module and model
self._logger = get_logger(self.__module__, model=model,
log_level=verbose)
self._logger.debug('Simulator created')
self._model = model
self.verbose = verbose
self.tout = None
# Per-run initial conditions/parameter/tspan override
self._tspan = tspan
# Per-run tspan, initials and param_values
self._run_tspan = None
self._run_initials = None
self._run_params = None
# Base initials and param values
self._params = None
self.param_values = param_values
self._initials = None
self.initials = initials
# Store init kwargs and run kwargs if needed for saving results
self._init_kwargs = kwargs
self._run_kwargs = None
@property
def model(self):
return self._model
@property
def tspan(self):
return self._run_tspan if self._run_tspan is not None else self._tspan
@tspan.setter
def tspan(self, new_tspan):
self._tspan = new_tspan
@staticmethod
def _num_sims_calc(initials_or_params):
""" Calculate number of simulations implied by initials or param
values """
if initials_or_params is None:
return None
if isinstance(initials_or_params, np.ndarray):
return len(initials_or_params)
first_entry = next(iter(initials_or_params.values()))
try:
return len(first_entry) # First entry is iterable
except TypeError:
return 1 # First entry is non-iterable, e.g. int, float
@property
def initials_length(self):
try:
return len(self.initials)
except SimulatorException:
# Network free simulators
if self._initials:
return len(list(self._initials.values())[0])
elif self._run_initials:
return len(list(self._run_initials.values())[0])
else:
return len(self.param_values)
def _update_initials_dict(self, initials_dict, initials_source, subs=None):
if isinstance(initials_source, Mapping):
# Can't just use .update() as we need to test
# equality with .is_equivalent_to()
for cp, value_obj in initials_source.items():
cp = as_complex_pattern(cp)
if any(existing_cp.is_equivalent_to(cp)
for existing_cp in initials_dict):
continue
if isinstance(value_obj, (Sequence, np.ndarray))\
and all(isinstance(v, numbers.Number) for v in value_obj):
value = value_obj
elif isinstance(value_obj, Expression):
value = [value_obj.expand_expr().xreplace(subs[sim]) for sim in range(len(subs))]
elif isinstance(value_obj, Parameter):
# Set parameter using param_values
pi = self._model.parameters.index(value_obj)
value = [self.param_values[sim][pi] for sim in range(len(self.param_values))]
else:
raise TypeError("Unexpected initial condition "
"value type: %s" % type(value_obj))
initials_dict[cp] = value
elif initials_source is not None:
# Update from array-like structure, which we can only do if we
# have the species available (e.g. not in network-free simulations)
if not self.model.species:
raise ValueError(
'Cannot update initials from an array-like source without '
'model species.')
for cp_idx, cp in enumerate(self.model.species):
if any(existing_cp.is_equivalent_to(cp) for existing_cp in
initials_dict):
continue
initials_dict[cp] = [initials_source[n][cp_idx]
for n in range(len(initials_source))]
return initials_dict
@property
def initials_dict(self):
n_sims = self._check_run_initials_vs_base_initials_length()
if n_sims == 1:
n_sims = len(self.param_values)
# Apply any per-run initial overrides
initials_dict = self._update_initials_dict({}, self._run_initials)
# Apply any base initial overrides
initials_dict = self._update_initials_dict(initials_dict,
self._initials)
model_initials = {ic.pattern: ic.value
for ic in self.model.initials}
# Otherwise, populate initials from the model
n_sims_params = len(self.param_values)
n_sims_actual = max(n_sims_params, n_sims)
# Get remaining initials from the model itself and
# self.param_values, if necessary
subs = None
if any(isinstance(v, Expression) for v in model_initials.values()):
# Only need parameter substitutions if model initials include
# expressions
subs = [
dict((p, pv[i]) for i, p in
enumerate(self._model.parameters))
for pv in self.param_values]
if len(subs) == 1 and n_sims_actual > 1:
subs = list(itertools.repeat(subs[0], n_sims_actual))
initials_dict = self._update_initials_dict(
initials_dict, model_initials, subs=subs
)
return initials_dict
def _check_run_initials_vs_base_initials_length(self):
# Otherwise, build the list from the model, and any overrides
# specified in self._initials and self._run_initials
n_sims_initials = self._num_sims_calc(self._initials)
n_sims_run = self._num_sims_calc(self._run_initials)
if n_sims_initials is not None and n_sims_run is not None \
and n_sims_run != n_sims_initials:
raise ValueError(
"The base initials set with self.initials imply {} "
"simulations, but the run() initials imply {} simulations."
" Either set self.initials=None, or change the number of "
"simulations in the run() initials".format(
n_sims_initials, n_sims_run))
if n_sims_initials is not None:
return n_sims_initials
elif n_sims_run is not None:
return n_sims_run
else:
return 1
@property
def initials(self):
if not self.model.species:
raise SimulatorException('No model species list - either '
'generate the model equations or use '
'initials_dict() for network-free '
'simulations')
# Check potential quick return options
if self._run_initials is not None:
if not isinstance(self._run_initials, Mapping) and \
self._initials is None:
return self._run_initials
elif not isinstance(self._initials, Mapping) and \
self._initials is not None:
return self._initials
# At this point (after dimensionality check), we can return
# self._run_initials if it's not a dictionary and not None
if self._run_initials is not None and not isinstance(
self._run_initials, Mapping):
return self._run_initials
n_sims_initials = self._check_run_initials_vs_base_initials_length()
n_sims_params = len(self.param_values)
n_sims_actual = max(n_sims_params, n_sims_initials)
y0 = np.full((n_sims_actual, len(self.model.species)), 0.0)
for species, vals in self.initials_dict.items():
species_index = self._model.get_species_index(species)
y0[:, species_index] = vals
return y0
@initials.setter
def initials(self, new_initials):
self._initials = self._process_incoming_initials(new_initials)
def _process_incoming_initials(self, new_initials):
if new_initials is None:
return None
# If new_initials is a pandas dataframe, convert to a dict
if pd and isinstance(new_initials, pd.DataFrame):
new_initials = new_initials.to_dict(orient='list')
# If new_initials is a list, convert to numpy array
if isinstance(new_initials, list):
new_initials = np.array(new_initials, copy=False)
# Check if new_initials is a dict, and if so validate the keys
# (ComplexPatterns)
if isinstance(new_initials, dict):
n_sims = 1
if len(new_initials) > 0:
n_sims = self._num_sims_calc(new_initials)
for cplx_pat, val in new_initials.items():
if not isinstance(cplx_pat, (MonomerPattern,
ComplexPattern)):
raise ValueError('Dictionary key %s is not a '
'MonomerPattern or ComplexPattern' %
repr(cplx_pat))
# if val is a number, convert it to a single-element array
if not isinstance(val, (Sequence, np.ndarray)):
val = [val]
new_initials[cplx_pat] = np.array(val)
# otherwise, check whether simulator supports multiple
# initial values :
if len(val) != n_sims:
raise ValueError("all arrays in new_initials dictionary "
"must be equal length")
if not np.isfinite(val).all():
raise ValueError('Please check initial {} for non-finite '
'values'.format(cplx_pat))
elif isinstance(new_initials, np.ndarray):
# if new_initials is a 1D array, convert to a 2D array of length 1
if len(new_initials.shape) == 1:
new_initials = np.resize(new_initials, (1, len(new_initials)))
n_sims = new_initials.shape[0]
# make sure number of initials values equals len(model.species)
if new_initials.shape[1] != len(self._model.species):
raise ValueError("new_initials must be the same length as "
"model.species")
if not np.isfinite(new_initials).all():
raise ValueError('Please check initials array '
'for non-finite values')
else:
raise ValueError(
'Implicit conversion of data type "{}" is not '
'supported. Please supply initials as a numpy array, list, '
'or a pandas DataFrame.'.format(type(new_initials)))
if n_sims > 1:
if not self._supports['multi_initials']:
raise ValueError(
self.__class__.__name__ +
" does not support multiple initial values at this time.")
if 1 < len(self.param_values) != n_sims:
raise ValueError(
'Cannot set initials for {} simulations '
'when param_values has been set for {} '
'simulations'.format(
n_sims, len(self.param_values)))
return new_initials
@property
def param_values(self):
if not self.model._derived_parameters:
if self._params is not None and \
not isinstance(self._params, dict) and \
self._run_params is None:
return self._params
elif self._run_params is not None and \
not isinstance(self._run_params, dict) and \
self._params is None:
return self._run_params
# create parameter vector from the values in the model
param_values_dict = {}
n_sims = self._num_sims_calc(self._params)
if isinstance(self._params, dict):
param_values_dict.update(self._params)
elif isinstance(self._params, np.ndarray):
param_values_dict = dict(zip(
[p.name for p in self._model.parameters], self._params.T))
n_sims_run = self._num_sims_calc(self._run_params)
if n_sims is None:
n_sims = n_sims_run
elif n_sims_run is not None and n_sims_run != n_sims:
raise ValueError(
"The base parameters set with self.param_values imply "
"{} simulations, but the run() params imply {} "
"simulations. Either set self.param_values=None, or "
"change the number of simulations in the run() params"
.format(n_sims, n_sims_run))
# At this point (after dimensionality check) we can return the
# _run_params, if it's not a dict
if self._run_params is not None:
if not isinstance(self._run_params, dict):
if not self._model._derived_parameters:
return self._run_params
else:
param_values_dict.update(dict(zip(
self.model.parameters.keys(), self._run_params
)))
else:
param_values_dict.update(self._run_params)
if n_sims is None:
n_sims = 1
# Get the base parameters from the model
param_values = np.array(
[p.value for p in self._model.parameters] +
[p.value for p in self._model._derived_parameters]
)
param_values = np.repeat([param_values], n_sims, axis=0)
# Process overrides
for key in param_values_dict.keys():
try:
pi = self._model.parameters.index(
self._model.parameters[key])
except KeyError:
raise IndexError("new_params dictionary has unknown "
"parameter name (%s)" % key)
# loop over n_sims
for n in range(n_sims):
param_values[n][pi] = param_values_dict[key][n]
return param_values
@param_values.setter
def param_values(self, new_params):
self._params = self._process_incoming_params(new_params)
def _process_incoming_params(self, new_params):
if new_params is None:
return None
# Convert pandas dataframe to dictionary
if pd and isinstance(new_params, pd.DataFrame):
new_params = new_params.to_dict(orient='list')
# If new_params is a list, convert to numpy array
if isinstance(new_params, list):
new_params = np.array(new_params)
if isinstance(new_params, dict):
n_sims = 1
if len(new_params) > 0:
n_sims = self._num_sims_calc(new_params)
for key, val in new_params.items():
if key not in self._model.parameters.keys():
raise IndexError("new_params dictionary has unknown "
"parameter name (%s)" % key)
# if val is a number, convert it to a single-element array
if not isinstance(val, Sequence):
val = [val]
new_params[key] = np.array(val)
# Check all elements are the same length
if len(val) != n_sims:
raise ValueError("all arrays in params dictionary "
"must be equal length")
for value in val:
try:
self._model.parameters[key].check_value(value)
except ValueError as e:
raise InconsistentParameterError(
key, value, str(e)
)
elif isinstance(new_params, np.ndarray):
# if new_params is a 1D array, convert to a 2D array of length 1
if len(new_params.shape) == 1:
new_params = np.resize(new_params, (1, len(new_params)))
n_sims = new_params.shape[0]
# make sure number of param values equals len(model.parameters)
if new_params.shape[1] != len(self._model.parameters):
raise ValueError("new_params must be the same length as "
"model.parameters")
for isim in range(n_sims):
for param, value in zip(self._model.parameters,
new_params[isim, :]):
try:
param.check_value(value)
except ValueError as e:
raise InconsistentParameterError(
param.name, value, str(e)
)
else:
raise ValueError(
'Implicit conversion of data type "{}" is not '
'supported. Please supply parameters as a numpy array, list, '
'or a pandas DataFrame.'.format(type(new_params)))
# Check whether simulator supports multiple param_values
if n_sims > 1 and not self._supports['multi_param_values']:
raise ValueError(
self.__class__.__name__ +
" does not support multiple parameter values at this time.")
return new_params
def _reset_run_overrides(self):
"""
Reset any single-run tspan, initials, param_values
When calling run(), the user can specify tspan, initials and
param_values, which are only used for a single run. This method
resets those overrides after the run is complete (called from
:func:`SimulationResult.__init__`).
"""
self._run_tspan = None
self._run_initials = None
self._run_params = None
@abstractmethod
def run(self, tspan=None, initials=None, param_values=None,
_run_kwargs=None):
"""Run a simulation.
Notes for developers implementing Simulator subclasses:
Implementations should return a :class:`.SimulationResult` object.
Subclasses should pass any additional arguments run as a dictonary
to the `_run_kwargs` argument when calling the superclass's run
method. If the run method has variable keyword arguments, this can
be achieved by passing `_run_kwargs=locals()` to the superclass's
run method. The run kwargs are used for reference when saving and
loading SimulationResults to disk. They aren't compulsory, but not
including them will generate a warning. To suppress (e.g. if there
are no additional arguments), set `_run_kwargs=[]`.
"""
self._logger.info('Simulation(s) started')
if _run_kwargs:
# Don't store these arguments twice
_run_kwargs.pop('self')
_run_kwargs.pop('initials', None)
_run_kwargs.pop('param_values', None)
_run_kwargs.pop('tspan', None)
self._run_kwargs = _run_kwargs
elif _run_kwargs is None:
self._logger.warning(
'{} has not passed any additional run arguments to '
'_run_kwargs. Instructions are included in the Simulation '
'base class run method docstring.'.format(
self.__class__.__name__))
self._run_tspan = tspan
if self.tspan is None:
raise ValueError("tspan must be defined before "
"simulation can run")
self._run_params = self._process_incoming_params(param_values)
self._run_initials = self._process_incoming_initials(initials)
# If only one set of param_values, run all simulations
# with the same parameters
if len(self.param_values) == 1 and self.initials_length > 1:
new_params = np.repeat(self.param_values,
self.initials_length,
axis=0)
self._run_params = new_params
# Error checks on 'param_values' and 'initials'
if len(self.param_values) != self.initials_length:
raise ValueError(
"'param_values' and 'initials' must be equal lengths.\n"
"len(param_values): %d\n"
"len(initials): %d" %
(len(self.param_values), self.initials_length))
elif len(self.param_values.shape) != 2 or \
self.param_values.shape[1] != (
len(self._model.parameters) +
len(self._model._derived_parameters)):
raise ValueError(
"'param_values' must be a 2D array of dimension N_SIMS x "
"len(model.parameters).\n"
"param_values.shape: " + str(self.param_values.shape) +
"\nlen(model.parameters): %d" %
len(self._model.parameters))
if self.model.species and (len(self.initials.shape) != 2 or
self.initials.shape[1] != len(self._model.species)):
raise ValueError(
"'initials' must be a 2D array of dimension N_SIMS x "
"len(model.species).\n"
"initials.shape: " + str(self.initials.shape) +
"\nlen(model.species): %d" % len(self._model.species))
return None
class SimulationResult(object):
"""
Results of a simulation with properties and methods to access them.
.. warning::
Please note that the interface for this class is considered
experimental and may change without warning as PySB is updated.
Notes
-----
In the attribute descriptions, a "trajectory set" is a 2D numpy array,
species on first axis and time on second axis, with each element
containing the concentration or count of the species at the specified time.
A list of trajectory sets contains a trajectory set for each simulation.
Parameters
----------
simulator : Simulator
The simulator object that generated the trajectories
tout: list-like
Time points returned by the simulator (may be different from ``tspan``
if simulation is interrupted for some reason).
trajectories : list or numpy.ndarray
A set of species trajectories from a simulation. Should either be a
list of 2D numpy arrays or a single 3D numpy array.
squeeze : bool, optional (default: True)
Return trajectories as a 2D array, rather than a 3d array, if only
a single simulation was performed.
simulations_per_param_set : int
Number of trajectories per parameter set. Typically always 1 for
deterministic simulators (e.g. ODE), but with stochastic simulators
multiple trajectories per parameter/initial condition set are often
desired.
model: pysb.Model
initials: numpy.ndarray
param_values: numpy.ndarray
model, initials, param_values are an alternative constructor
mechanism used when loading SimulationResults from files (see
:func:`SimulationResult.load`). Setting just the simulator argument
instead of these arguments is recommended.
Examples
--------
The following examples use a simple model with three observables and one
expression, with a single simulation.
>>> from pysb.examples.expression_observables import model
>>> from pysb.simulator import ScipyOdeSimulator
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> sim = ScipyOdeSimulator(model, tspan=np.linspace(0, 40, 10), \
integrator_options={'atol': 1e-20})
>>> simulation_result = sim.run()
``simulation_result`` is a :class:`SimulationResult` object. An
observable can be accessed like so:
>>> print(simulation_result.observables['Bax_c0']) \
#doctest: +NORMALIZE_WHITESPACE
[1.0000e+00 1.1744e-02 1.3791e-04 1.6196e-06 1.9020e-08
2.2337e-10 2.6232e-12 3.0806e-14 3.6178e-16 4.2492e-18]
It is also possible to retrieve the value of all observables at a
particular time point, e.g. the final concentrations:
>>> print(simulation_result.observables[-1]) \
#doctest: +SKIP
(4.2492e-18, 1.6996e-16, 1.)
Expressions are read in the same way as observables:
>>> print(simulation_result.expressions['NBD_signal']) \
#doctest: +NORMALIZE_WHITESPACE
[0. 4.7847 4.9956 4.9999 5. 5. 5. 5. 5. 5. ]
The species trajectories can be accessed as a numpy ndarray:
>>> print(simulation_result.species) #doctest: +NORMALIZE_WHITESPACE
[[1.0000e+00 0.0000e+00 0.0000e+00]
[1.1744e-02 5.2194e-02 9.3606e-01]
[1.3791e-04 1.2259e-03 9.9864e-01]
[1.6196e-06 2.1595e-05 9.9998e-01]
[1.9020e-08 3.3814e-07 1.0000e+00]
[2.2337e-10 4.9637e-09 1.0000e+00]
[2.6232e-12 6.9951e-11 1.0000e+00]
[3.0806e-14 9.5840e-13 1.0000e+00]
[3.6178e-16 1.2863e-14 1.0000e+00]
[4.2492e-18 1.6996e-16 1.0000e+00]]
Species, observables and expressions can be combined into a single numpy
ndarray and accessed similarly. Here, the initial concentrations of all
these entities are examined:
>>> print(simulation_result.all[0]) #doctest: +SKIP
( 1., 0., 0., 1., 0., 0., 0.)
The ``all`` array can be accessed as a pandas DataFrame object,
which allows for more convenient indexing and access to pandas advanced
functionality, such as indexing and slicing. Here, the concentrations of
the observable ``Bax_c0`` and the expression ``NBD_signal`` are read at
time points between 5 and 15 seconds:
>>> df = simulation_result.dataframe
>>> print(df.loc[5:15, ['Bax_c0', 'NBD_signal']]) \
#doctest: +NORMALIZE_WHITESPACE
Bax_c0 NBD_signal
time
8.888889 0.000138 4.995633
13.333333 0.000002 4.999927
"""
CUSTOM_ATTR_PREFIX = 'usrattr_'
def __init__(self, simulator, tout, trajectories=None,
observables_and_expressions=None, squeeze=True,
simulations_per_param_set=1,
model=None, initials=None, param_values=None):
if simulator:
simulator._logger.debug('SimulationResult constructor started')
self._param_values = simulator.param_values.copy()
try:
self._initials = simulator.initials.copy()
except SimulatorException:
# Network free simulations don't have initials list, only dict
self._initials = simulator.initials_dict.copy()
self._model = copy.deepcopy(simulator._model)
self.simulator_class = simulator.__class__
self.init_kwargs = copy.deepcopy(simulator._init_kwargs)
self.run_kwargs = copy.deepcopy(simulator._run_kwargs)
else:
self._param_values = param_values
self._initials = initials
self._model = model
self.simulator_class = None
self.init_kwargs = {}
self.run_kwargs = {}
self.squeeze = squeeze
self.tout = np.asarray(tout)
self._yfull = None
self.n_sims_per_parameter_set = simulations_per_param_set
self.pysb_version = PYSB_VERSION
self.timestamp = datetime.now()
self.custom_attrs = {}
if trajectories is None and observables_and_expressions is None:
raise ValueError('Need to supply at least one of species '
'trajectories or observables_and_expressions')
if trajectories is not None and len(trajectories) > 0:
# Validate incoming trajectories
if getattr(trajectories, 'ndim', None) == 3:
# trajectories is a 3D array, create a list of 2D arrays
# This is just a view and doesn't copy the data
self._y = [tr for tr in trajectories]
else:
# Not a 3D array, check for a list of 2D arrays
try:
if any(tr.ndim != 2 for tr in trajectories):
raise AttributeError
except (AttributeError, TypeError):
raise ValueError("trajectories should be a 3D array or a "
"list of 2D arrays")
self._y = trajectories
self._nsims = len(self._y)
if len(self.tout) != self.nsims:
raise ValueError("Simulator tout should be the same length as "
"trajectories")
for i in range(self.nsims):
if len(self.tout[i]) != self._y[i].shape[0]:
raise ValueError("The number of time points in tout[{0}] "
"should match the trajectories array for "
"simulation {0}".format(i))
if self._y[i].shape[1] != len(self._model.species):
raise ValueError("The number of species in trajectory {0} "
"should match length of "
"model.species".format(i))
else:
self._y = None
# Calculate ``yobs`` and ``yexpr`` based on values of ``y``
exprs = self._model.expressions_dynamic(include_local=False)
expr_names = [expr.name for expr in exprs]
model_obs = self._model.observables
obs_names = list(model_obs.keys())
param_names = list(p.name for p in self._model.parameters)
if not _allow_unicode_recarray():
for name_list, name_type in zip(
(expr_names, obs_names, param_names),
('Expression', 'Observable', 'Parameter')):
for i, name in enumerate(name_list):
try:
name_list[i] = name.encode('ascii')
except UnicodeEncodeError:
error_msg = 'Non-ASCII compatible ' + \
'%s names not allowed' % name_type
raise ValueError(error_msg)
yobs_dtype = (list(zip(obs_names, itertools.repeat(float)))
if obs_names else float)
yexpr_dtype = (list(zip(expr_names, itertools.repeat(float)))
if expr_names else float)
if observables_and_expressions:
# Observables and expression values are used as supplied
self._nsims = len(observables_and_expressions)
self._yobs_view = [observables_and_expressions[n][:, 0:(len(
self._model.observables))] for n in range(self.nsims)]
self._yexpr_view = [observables_and_expressions[n][:, (len(
self._model.observables)):] for n in range(self.nsims)]
self._yobs = [self._yobs_view[n].reshape(
len(tout[n]) * len(obs_names)).view(dtype=yobs_dtype) for n
in range(self.nsims)]
self._yexpr = [self._yexpr_view[n].reshape(
len(tout[n]) * len(expr_names)).view(dtype=yexpr_dtype) for n
in range(self.nsims)]
else:
self._yobs = [np.ndarray((len(self.tout[n]),), dtype=yobs_dtype) if obs_names
else np.ndarray((len(self.tout[n]), 0), dtype=yobs_dtype)
for n in range(self.nsims)]
self._yobs_view = [self._yobs[n].view(float).
reshape(len(self._yobs[n]), -1) for n in range(
self.nsims)]
self._yexpr = [np.ndarray((len(self.tout[n]),), dtype=yexpr_dtype) if expr_names
else np.ndarray((len(self.tout[n]), 0), dtype=yexpr_dtype)
for n in range(self.nsims)]
self._yexpr_view = [self._yexpr[n].view(float).reshape(len(
self._yexpr[n]), -1) for n in range(self.nsims)]
# loop over simulations
sym_names = obs_names + param_names
expanded_exprs = [sympy.lambdify(sym_names, expr.expand_expr(),
"numpy") for expr in exprs]
for n in range(self.nsims):
if simulator:
simulator._logger.log(EXTENDED_DEBUG,
'Evaluating exprs/obs %d/%d'
% (n + 1, self.nsims))
# observables
for i, obs in enumerate(model_obs):
self._yobs_view[n][:, i] = (
self._y[n][:, obs.species] * obs.coefficients).sum(axis=1)
# expressions
sym_dict = dict((k, self._yobs[n][k]) for k in obs_names)
sym_dict.update(dict((p.name, self.param_values[
n // self.n_sims_per_parameter_set][i]) for i, p in
enumerate(self._model.parameters)))
for i, expr in enumerate(exprs):
self._yexpr_view[n][:, i] = expanded_exprs[i](**sym_dict)
if simulator:
simulator._reset_run_overrides()
simulator._logger.debug('SimulationResult constructor finished')
def _squeeze_output(self, trajectories):
"""
Reduces trajectories to a 2D matrix if only one simulation present
Can be disabled by setting self.squeeze to False
"""
if self.nsims == 1 and self.squeeze:
return trajectories[0]
else:
return trajectories
@property
def nsims(self):
""" The number of simulations in this SimulationResult """
return self._nsims
@property
def all(self):
"""
Aggregate species, observables, and expressions trajectories into
a numpy.ndarray with record-style data-type for return to the user.
"""
if self._yfull is None:
if self._y is None:
yfull_dtype = []
else:
sp_names = ['__s%d' % i
for i in range(len(self._model.species))]
yfull_dtype = list(zip(sp_names, itertools.repeat(float)))
if len(self._model.observables):
yfull_dtype += self._yobs[0].dtype.descr
if len(self._model.expressions_dynamic()):
yfull_dtype += self._yexpr[0].dtype.descr
yfull = []
# loop over simulations
for n in range(self.nsims):
yfull.append(np.ndarray(len(self.tout[n]), yfull_dtype))
yfull_view = yfull[n].view(float).reshape((len(yfull[n]), -1))
n_sp = self._y[n].shape[1] if self._y else 0
n_ob = self._yobs_view[n].shape[1]
n_ex = self._yexpr_view[n].shape[1]
if self._y:
yfull_view[:, :n_sp] = self._y[n]
yfull_view[:, n_sp:n_sp + n_ob] = self._yobs_view[n]
yfull_view[:, n_sp + n_ob:n_sp + n_ob + n_ex] = \
self._yexpr_view[n]
self._yfull = yfull
return self._squeeze_output(self._yfull)
@property
def dataframe(self):
"""
A conversion of the trajectory sets (species, observables and
expressions for all simulations) into a single
:py:class:`pandas.DataFrame`.
"""
if pd is None:
raise Exception('Please "pip install pandas" for this feature')
sim_ids = (np.repeat(range(self.nsims), [len(t) for t in self.tout]))
times = np.concatenate(self.tout)
if self.nsims == 1 and self.squeeze:
idx = pd.Index(times, name='time')
else:
idx = pd.MultiIndex.from_tuples(list(zip(sim_ids, times)),
names=['simulation', 'time'])
simdata = self.all
if not isinstance(simdata, np.ndarray):
simdata = np.concatenate(simdata)
return pd.DataFrame(simdata, index=idx)
@property
def species(self):
"""
List of trajectory sets. The first dimension contains species.
"""
if self._y is None:
raise ValueError('No trajectories are available for network-free '
'simulations')
return self._squeeze_output(self._y)
@property
def observables(self):
"""
List of trajectory sets. The first dimension contains observables.
"""
if not self._model.observables:
raise ValueError('Model has no observables')
return self._squeeze_output(self._yobs)
def observable(self, pattern):
"""
Calculate a pattern's trajectories without adding to model
This method calculates an observable "on demand" using
any supplied MonomerPattern or ComplexPattern against the simulation
result, without re-running the simulation.
Note that the monomers within the supplied pattern are reconciled
with the SimulationResult's internal copy of the model by name. This
method only works on simulations which calculate species
trajectories (i.e. it will not work on network-free simulations).
Raises a ValueError if the pattern does not match at least one species.
Parameters
----------
pattern: pysb.MonomerPattern or pysb.ComplexPattern
An observable pattern to match
Returns
-------
pandas.Series
Series containing the simulation trajectories for the specified
observable
Examples
--------
>>> from pysb import ANY
>>> from pysb.examples import earm_1_0
>>> from pysb.simulator import ScipyOdeSimulator
>>> simres = ScipyOdeSimulator(earm_1_0.model, tspan=range(5)).run()
>>> m = earm_1_0.model.monomers
Observable of bound Bid:
>>> simres.observable(m.Bid(b=ANY))
time
0 0.000000e+00
1 1.190933e-12
2 2.768582e-11
3 1.609716e-10
4 5.320530e-10
dtype: float64
Observable of AMito bound to mCytoC:
>>> simres.observable(m.AMito(b=1) % m.mCytoC(b=1))
time
0 0.000000e+00
1 1.477319e-77
2 1.669917e-71
3 5.076939e-69
4 1.157400e-66
dtype: float64
"""
# Adjust the supplied pattern's monomer objects to match the
# simulationresult's internal model
if isinstance(pattern, MonomerPattern):
self._update_monomer_pattern(pattern)
elif isinstance(pattern, ComplexPattern):
for mp in pattern.monomer_patterns:
self._update_monomer_pattern(mp)
else:
raise ValueError('The pattern must be a MonomerPattern or '
'ComplexPattern')
if self._y is None:
raise ValueError('On demand observables can only be calculated '
'on simulations with species trajectories')
obs_matches = SpeciesPatternMatcher(self._model).match(
pattern, index=True, counts=True)
if not obs_matches:
raise ValueError('No species match the supplied observable '
'pattern')
return self.dataframe.iloc[:, list(obs_matches.keys())].multiply(
list(obs_matches.values())).sum(axis=1)
def _update_monomer_pattern(self, pattern):
""" Update a pattern's monomer objects to use internal model
Internal function for in-place update of a pattern to replace its
monomers with those from SimulationResult's model, matching by name.
Raises ValueError if no monomer with the specified name is in the
model.
"""
mon_name = pattern.monomer.name
try:
new_mon = self._model.monomers[mon_name]
except KeyError:
raise ValueError('There was no monomer called "{}" in the model '
'"{}" at the time of simulation'.format(
mon_name, self._model.name))
pattern.monomer = new_mon
@property
def expressions(self):
"""
List of trajectory sets. The first dimension contains expressions.
"""
if not self._model.expressions_dynamic():
raise ValueError('Model has no dynamic expressions')
return self._squeeze_output(self._yexpr)
@property
def initials(self):
return self._initials
@property
def param_values(self):
return self._param_values
def save(self, filename, dataset_name=None, group_name=None,
append=False, include_obs_exprs=False):
"""
Save a SimulationResult to a file (HDF5 format)
HDF5 is a hierarchical, binary storage format well suited to storing
matrix-like data. Our implementation requires the h5py package.
Each SimulationResult is treated as an HDF5 dataset, stored within a
group which is specific to a model. In this way, it is possible to save
multiple SimulationResults for a specific model.
A group is first created in the HDF file root (see group_name
argument). Within that group, a dataset "_model" has a JSON
version of the PySB model. SimulationResult are stored as groups
within the model group.
The file hierarchy under group_name/dataset_name/ then consists of
the following HDF5 gzip compressed HDF5 datasets: trajectories,
param_values, initials, tout, observables (optional) and expressions
(optional); and the following attributes:
simulator_class (pickled Class), simulator_kwargs (pickled dict),
squeeze (bool), simulations_per_param_set (int), pysb_version (str),
timestamp (ISO 8601 format).
Custom attributes can be stored in the SimulationResult's
`custom_attrs` dictionary. Keys should be strings, values can be any
picklable object. When saved to HDF5, these custom attributes will
be prefixed with ``usrattr_``.
Parameters
----------
filename: str
Filename to which the data will be saved
dataset_name: str or None
Dataset name. If None, it will default to 'result'. If the
dataset_name already exists within the group, a ValueError is
raised.
group_name: str or None
Group name. If None, will default to the name of the model.
append: bool
If False, raise IOError if the specified file already exists. If
True, append to existing file (or create if it doesn't exist).
include_obs_exprs: bool
Whether to save observables and expressions in the file or not.
If they are not included, they can be recreated from the model
and species trajectories when loaded back into PySB, but you may
wish to include them for use with external software, or if you
have complex expressions which take a long time to compute.
"""
if h5py is None:
raise Exception('Please install the h5py package for this feature')
if self._y is None and not include_obs_exprs:
warn('This SimulationResult has no trajectories - '
'you will need to set include_obs_exprs=True if '
'you wish to save observables and expressions')
if group_name is None:
group_name = self._model.name
if dataset_name is None:
dataset_name = 'result'
# np.void maps to bytes in HDF5.
enpickle = lambda obj: np.void(pickle.dumps(obj, -1))
model_json = JsonExporter(self._model).export(include_netgen=True)
with h5py.File(filename, 'a' if append else 'w-') as hdf:
# Get or create the group
try:
grp = hdf.create_group(group_name)
grp.create_dataset('_model_json', data=model_json)
if '_model' in grp:
raise ValueError()
except ValueError:
grp = hdf[group_name]
if '_model_json' in grp:
model = model_from_json(grp['_model_json'][()])
else:
with _patch_model_setstate():
model = pickle.loads(grp['_model'][()])
if model.name != self._model.name:
raise ValueError('SimulationResult model has name "{}", '
'but the model in HDF5 file group "{}" '
'has name "{}"'.format(self._model.name,
group_name,
model.name))
# Create the result dataset, which is actually a nested HDF group
dset = grp.create_group(dataset_name)
if self._y is not None:
dset.create_dataset('trajectories', data=self._y,
compression='gzip', shuffle=True)
if include_obs_exprs:
dset.create_dataset('observables', data=self._yobs_view,
compression='gzip', shuffle=True)
dset.create_dataset('expressions', data=self._yexpr_view,
compression='gzip', shuffle=True)
dset.create_dataset('param_values', data=self.param_values,
compression='gzip', shuffle=True)
if isinstance(self.initials, np.ndarray):
dset.create_dataset('initials', data=self.initials,
compression='gzip', shuffle=True)
else:
dset.create_dataset('initials_dict', data=enpickle(
self.initials))
dset.create_dataset('tout', data=self.tout,
compression='gzip')
dset.attrs['simulator_class'] = enpickle(self.simulator_class)
dset.attrs['init_kwargs'] = enpickle(self.init_kwargs)
dset.attrs['run_kwargs'] = enpickle(self.run_kwargs)
dset.attrs['squeeze'] = self.squeeze
dset.attrs['simulations_per_param_set'] = \
self.n_sims_per_parameter_set
dset.attrs['pysb_version'] = self.pysb_version
dset.attrs['timestamp'] = datetime.isoformat(
self.timestamp)
# This is the range of ints that can be natively encoded in HDF5.
int_min = np.iinfo(np.int64).min
int_max = np.iinfo(np.uint64).max
for attr_name, attr_val in self.custom_attrs.items():
# Pass HDF5-native values straight through, pickling others.
if (not (isinstance(attr_val,
(str, bytes, float, complex))
or (isinstance(attr_val, numbers.Integral)
and int_min <= attr_val <= int_max))):
attr_val = enpickle(attr_val)
dset.attrs[self.CUSTOM_ATTR_PREFIX + attr_name] = attr_val
@classmethod
def load(cls, filename, dataset_name=None, group_name=None):
"""
Load a SimulationResult from a file (HDF5 format)
For a description of the file format see :func:`save`
Parameters
----------
filename: str
Filename from which to load data
dataset_name: str or None
Dataset name. Can be left as None when the group specified only
contains one dataset, which will then be selected. If None and
more than one dataset is in the group, a ValueError is raised.
group_name: str or None
Group name. This is typically the name of the model. Can be left as
None when the file only contains one group, which will then be
selected. If None and more than group is in the file a
ValueError is raised.
Returns
-------
SimulationResult
Set of trajectories and associated metadata loaded from the file
"""
if h5py is None:
raise Exception('Please "pip install h5py" for this feature')
with h5py.File(filename, 'r') as hdf:
if group_name is None:
groups = hdf.keys()
if len(groups) > 1:
raise ValueError("group_name must be specified when file "
"contains more than one group. Options "
"are: {}".format(str(groups)))
group_name = next(iter(hdf))
grp = hdf[group_name]
if dataset_name is None:
datasets = [k for k in grp.keys() if k not in
('_model', '_model_json')]
if len(datasets) > 1:
raise ValueError("dataset_name must be specified when "
"group contains more than one dataset. "
"Options are: {}".format(str(datasets)))
dataset_name = datasets[0]
dset = grp[dataset_name]
obs_and_exprs = None
if 'observables' in dset.keys():
obs_and_exprs = list(dset['observables'][:])
if 'expressions' in dset.keys():
exprs = dset['expressions'][:]
if obs_and_exprs is None:
obs_and_exprs = list(exprs)
else:
for i in range(len(obs_and_exprs)):
obs_and_exprs[i] = np.concatenate(
[obs_and_exprs[i], exprs[i]],
axis=1
)
trajectories = None
try:
trajectories = dset['trajectories'][:]
except KeyError:
pass
try:
initials = dset['initials'][:]
except KeyError:
initials = pickle.loads(dset['initials_dict'][()])
if '_model_json' in grp:
model = model_from_json(grp['_model_json'][()])
else:
warn('The SimulationResult file uses an old model '
'format (pickled). It\'s recommended you re-save '
'the SimulationResult to use the new format (JSON).')
with _patch_model_setstate():
model = pickle.loads(grp['_model'][()])
simres = cls(
simulator=None,
model=model,
initials=initials,
param_values=dset['param_values'][:],
tout=dset['tout'][:],
trajectories=trajectories,
observables_and_expressions=obs_and_exprs,
squeeze=dset.attrs['squeeze'],
simulations_per_param_set=dset.attrs[
'simulations_per_param_set']
)
simres.pysb_version = dset.attrs['pysb_version']
simres.timestamp = dateutil.parser.parse(
dset.attrs['timestamp'])
simres.simulator_class = pickle.loads(
dset.attrs['simulator_class'])
simres.init_kwargs = pickle.loads(dset.attrs['init_kwargs'])
simres.run_kwargs = pickle.loads(dset.attrs['run_kwargs'])
for attr_name in dset.attrs.keys():
if attr_name.startswith(cls.CUSTOM_ATTR_PREFIX):
orig_name = attr_name[len(cls.CUSTOM_ATTR_PREFIX):]
attr_val = dset.attrs[attr_name]
# Restore objects that were pickled for storage.
if isinstance(attr_val, np.void):
attr_val = pickle.loads(attr_val)
simres.custom_attrs[orig_name] = attr_val
return simres
def _allow_unicode_recarray():
"""Return True if numpy recarray can take unicode data type.
In python 2, numpy doesn't allow unicode strings as names in arrays even
if they are ascii encodeable. This function tests this directly.
"""
try:
np.ndarray((1,), dtype=[(u'X', float)])
except TypeError:
return False
return True
def _model_setstate_monkey_patch(self, state):
"""Monkey patch for Model.__setstate__ for restoring from older pickles"""
# restore the 'model' weakrefs on all components
self.__dict__.update(state)
# Set "tags" attribute for older, pickled models
self.__dict__.setdefault('tags', ComponentSet())
for c in self.all_components():
c.model = weakref.ref(self)
@contextmanager
def _patch_model_setstate():
old_setstate = Model.__setstate__
Model.__setstate__ = _model_setstate_monkey_patch
try:
yield
finally:
Model.__setstate__ = old_setstate
| bsd-2-clause |
pizzathief/scipy | scipy/spatial/_spherical_voronoi.py | 7 | 13695 | """
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import warnings
import numpy as np
import scipy
from . import _voronoi
from scipy.spatial import cKDTree
__all__ = ['SphericalVoronoi']
def calculate_solid_angles(R):
"""Calculates the solid angles of plane triangles. Implements the method of
Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
that input points have unit norm."""
# Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
# This is equal to the determinant of the matrix [R1 R2 R3], which can be
# computed with better stability.
numerator = np.linalg.det(R)
denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
return np.abs(2 * np.arctan2(numerator, denominator))
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, ndim)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (ndim,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, ndim)
the points in `ndim` dimensions to generate the Voronoi diagram from
radius : double
radius of the sphere
center : double array of shape (ndim,)
center of the sphere
vertices : double array of shape (nvertices, ndim)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Methods
----------
calculate_areas
Calculates the areas of the Voronoi regions. For 2D point sets, the
regions are circular arcs. The sum of the areas is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
The Convex Hull neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement).
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
triangle. IEEE Transactions on Biomedical Engineering,
2, 1983, pp 125--126.
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi, geometric_slerp
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> t_vals = np.linspace(0, 1, 2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... n = len(region)
... for i in range(n):
... start = sv.vertices[region][i]
... end = sv.vertices[region][(i + 1) % n]
... result = geometric_slerp(start, end, t_vals)
... ax.plot(result[..., 0],
... result[..., 1],
... result[..., 2],
... c='k')
>>> ax.azim = 10
>>> ax.elev = 40
>>> _ = ax.set_xticks([])
>>> _ = ax.set_yticks([])
>>> _ = ax.set_zticks([])
>>> fig.set_size_inches(4, 4)
>>> plt.show()
"""
def __init__(self, points, radius=1, center=None, threshold=1e-06):
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
self.radius = float(radius)
self.points = np.array(points).astype(np.double)
self._dim = len(points[0])
if center is None:
self.center = np.zeros(self._dim)
else:
self.center = np.array(center, dtype=float)
# test degenerate input
self._rank = np.linalg.matrix_rank(self.points - self.points[0],
tol=threshold * self.radius)
if self._rank < self._dim:
raise ValueError("Rank of input points must be at least {0}".format(self._dim))
if cKDTree(self.points).query_pairs(threshold * self.radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(self.points - self.center, axis=1)
max_discrepancy = np.abs(radii - self.radius).max()
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# get Convex Hull
conv = scipy.spatial.ConvexHull(self.points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
self.vertices = self.radius * conv.equations[:, :-1] + self.center
self._simplices = conv.simplices
# calculate regions from triangulation
# for 3D input simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(len(self._simplices))
# for 3D input tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
# for 3D input point_indices will have shape: (6N-12,)
point_indices = self._simplices.ravel()
# for 3D input indices will have shape: (6N-12,)
indices = np.argsort(point_indices, kind='mergesort')
# for 3D input flattened_groups will have shape: (6N-12,)
flattened_groups = tri_indices[indices].astype(np.intp)
# intervals will have shape: (N+1,)
intervals = np.cumsum(np.bincount(point_indices + 1))
# split flattened groups to get nested list of unsorted regions
groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
for i in range(len(intervals) - 1)]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Raises
------
TypeError
If the points are not three-dimensional.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the circumcenter of the k-th triangle
in self._simplices. For each region n, we choose the first triangle
(=Voronoi vertex) in self._simplices and a vertex of that triangle
not equal to the center n. These determine a unique neighbor of that
triangle, which is then chosen as the second triangle. The second
triangle will have a unique vertex not equal to the current vertex or
the center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
if self._dim != 3:
raise TypeError("Only supported for three-dimensional point sets")
_voronoi.sort_vertices_of_regions(self._simplices, self.regions)
def _calculate_areas_3d(self):
self.sort_vertices_of_regions()
sizes = [len(region) for region in self.regions]
csizes = np.cumsum(sizes)
num_regions = csizes[-1]
# We create a set of triangles consisting of one point and two Voronoi
# vertices. The vertices of each triangle are adjacent in the sorted
# regions list.
point_indices = [i for i, size in enumerate(sizes)
for j in range(size)]
nbrs1 = np.array([r for region in self.regions for r in region])
# The calculation of nbrs2 is a vectorized version of:
# np.array([r for region in self.regions for r in np.roll(region, 1)])
nbrs2 = np.roll(nbrs1, 1)
indices = np.roll(csizes, 1)
indices[0] = 0
nbrs2[indices] = nbrs1[csizes - 1]
# Normalize points and vertices.
pnormalized = (self.points - self.center) / self.radius
vnormalized = (self.vertices - self.center) / self.radius
# Create the complete set of triangles and calculate their solid angles
triangles = np.hstack([pnormalized[point_indices],
vnormalized[nbrs1],
vnormalized[nbrs2]
]).reshape((num_regions, 3, 3))
triangle_solid_angles = calculate_solid_angles(triangles)
# Sum the solid angles of the triangles in each region
solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
solid_angles[1:] -= solid_angles[:-1]
# Get polygon areas using A = omega * r**2
return solid_angles * self.radius**2
def _calculate_areas_2d(self):
# Find start and end points of arcs
arcs = self.points[self._simplices] - self.center
# Calculate the angle subtended by arcs
cosine = np.einsum('ij,ij->i', arcs[:, 0], arcs[:, 1])
sine = np.abs(np.linalg.det(arcs))
theta = np.arctan2(sine, cosine)
# Get areas using A = r * theta
areas = self.radius * theta
# Correct arcs which go the wrong way (single-hemisphere inputs)
signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
self.vertices - self.center))
indices = np.where(signs < 0)
areas[indices] = 2 * np.pi * self.radius - areas[indices]
return areas
def calculate_areas(self):
"""Calculates the areas of the Voronoi regions.
For 2D point sets, the regions are circular arcs. The sum of the areas
is `2 * pi * radius`.
For 3D point sets, the regions are spherical polygons. The sum of the
areas is `4 * pi * radius**2`.
.. versionadded:: 1.5.0
Returns
-------
areas : double array of shape (npoints,)
The areas of the Voronoi regions.
"""
if self._dim == 2:
return self._calculate_areas_2d()
elif self._dim == 3:
return self._calculate_areas_3d()
else:
raise TypeError("Only supported for 2D and 3D point sets")
| bsd-3-clause |
kclauw/Dueling_Network_Architectures | results/plot.py | 4 | 1501 | import glob, os,csv
import sys
import numpy as np
import matplotlib.pyplot as plt
def read_files(folder):
runs = []
os.chdir(folder)
for file in glob.glob("*.csv"):
with open(file) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
steps = []
values = []
row1 = next(readCSV) # First line is header
for i,row in enumerate(readCSV):
step = row[1]
value = row[2]
#steps.append(step)
if i < 100:
steps.append(step)
values.append(float(value))
runs.append(values)
return [runs,steps]
def set_plot(values,label,xlabel,ylabel):
plt.plot(values,label=label)
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
leg = plt.legend(loc=4,prop={'size':10},fontsize=30,shadow=True,markerscale=100)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(10.0)
def main():
y_label = sys.argv[2]
#averages = [float(sum(col))/len(col) for col in zip(*read_files(sys.argv[1]))]
runs = read_files(sys.argv[1])
average_runs = [float(sum(col))/len(col) for col in zip(*runs[0])]
print(average_runs)
plt.figure()
plt.plot(average_runs)
plt.xlabel("Steps", fontsize=18)
plt.ylabel(y_label, fontsize=18)
plt.show()
plt.savefig('plot1.png')
if __name__ == "__main__":
main()
| mit |
dwweiss/pmLib | src/Neural.py | 1 | 27362 | """
Copyright (c) 2016- by Dietmar W Weiss
This is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 3.0 of
the License, or (at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this software; if not, write to the Free
Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
02110-1301 USA, or see the FSF site: http://www.fsf.org.
Version:
2018-02-08 DWW
"""
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as plt
try:
import neurolab as nl
_hasNeurolab = True
except ImportError:
_hasNeurolab = False
print("??? Import from 'neurolab' failed")
try:
import genetic_algorithm as gea
_hasGenetic = True
except ImportError:
_hasGenetic = False
print('??? Gea not imported')
class NeurGen(object):
"""
Trains feed-forward network with genetic algorithm
"""
def __init__(self, minmax, size):
self.trainf = None
self.errorf = None
def init(self):
pass
def train(self, X, Y, **kwargs):
"""
Trains feed-forward network with genetic algorithm
Args:
X (2D array_like of float):
training input
Y (2D array_like of float):
training target
kwargs (dict, optional):
keywork arguments:
epochs (int):
max number of iterations of single trial,
default is 1000
errorf (function)
error function: { nl.error.MSE() | nl.error.SSE() },
default is MSE
f (method):
method f(x) from Hybrid trough Empirical,
default is None (f(x) = x)
goal (float):
limit of 'errorf' for stop of training (0 < goal < 1),
default is 1e-5
hidden (int or array_like of int):
array of number of neurons in hidden layers,
default is max(1, round(nPoint / (alpha * (nInp + nOut))))
outputf (function):
activation function of output layer,
default is TanSig()
plot (int):
control of plots showing training progress,
default is 0 (no plot)
regularization (float):
control of regularization (sum of all weights is added to
cost function of training, 0. <= regularization <= 1,
default is 0 (no effect of sum of all weights)
show (int):
control of information about training, if show=0: no print,
default is epochs // 10
silent (bool):
if True, no information is sent to console,
default is False
smartTrials (bool):
if False, perform all trials even if goal has been reached,
default is True
trainers (string or list of string):
space separated string
if 'all' or None, all training algorithms will be applied,
default is 'bfgs'
transf (function):
activation function of hidden layers,
default is TanSig()
trials (int):
maximum number of training trials,
default is 3
Returns:
errorHistory (array of float):
error from 'errorf' for each epoch
"""
# the only non-NeuroLab argument:
f = kwargs.get('f', None)
assert f is not None
self.f = f.__get__(self, self.__class__)
epochs = kwargs.get('epochs', 1000)
errorf = kwargs.get('errorf', nl.error.MSE())
goal = kwargs.get('goal', 1.0001e-5)
hidden = kwargs.get('hidden', None)
outputf = kwargs.get('outputf', None)
regularization = kwargs.get('rr', 1.0)
show = kwargs.get('show', None)
transf = kwargs.get('transf', None)
errorHistory = None
return errorHistory
def sim(self, x):
y = None
return y
class Neural(object):
"""
a) Wraps different neural network implementations from
1) Neurolab: trains exclusively with backpropagation
2) gea: trains exclusively with genetic algorithm
b) Compares different training algorithms and different regularisation
settings
c) Presents graphically history of norms for each trial
References:
- Recommended training algorithms:
'bfgs': Broyden–Fletcher–Goldfarb–Shanno algorithm,
see: scipy.optimize.fmin_bfgs()
ref: wikipedia: Broyden-Fletcher-Goldfarb-Shanno_algorithm
'rprop': resilient backpropagation (NO REGULARIZATION)
ref: wikipedia: Rprop
- http://neupy.com/docs/tutorials.html#tutorials
Proposal for number of hidden neurons:
nHidden = nPoint / ([2..10] * (nInp + nOut))
2 -> lowest risk of over-fitting
Installation of neurolab:
1) Fetch neurolab.0.3.5.tar.gz file (or newer)
2) Change to download directory
3) python -m pip install .\neurolab.0.3.5.tar.gz
"""
def __init__(self):
self._X = None # input
self._Y = None # target
self._net = None # network
self._norm_y = None # data from normalization of target
self._xKeys = None # xKeys for import from data frame
self._yKeys = None # yKeys for import from data frame
self._trainers = '' # list of training algorithms
self._bestTrainer = '' # best train algorithm
self._finalErrors = [] # final errors of best trial for
# each trainer in 'self._trainers'
self._finalL2norms = [] # final L2-norm of best trial for each trainer
self._bestEpochs = [] # epochs of best trial for each trainer
plt.rcParams.update({'font.size': 14})
plt.rcParams['legend.fontsize'] = 14
@property
def bestTrainer(self):
return self._bestTrainer
def importDataFrame(self, df, xKeys, yKeys):
"""
Args:
df (DataFrame):
data object
xKeys (list of string):
input keys for data selection
yKeys (list of string):
output keys for data selection
"""
self._xKeys = list(xKeys)
self._yKeys = list(yKeys)
assert all(k in df for k in xKeys), 'unknown x-keys: ' + str(xKeys) + \
'valid keys: ' + df.columns
assert all(k in df for k in yKeys), 'unknown y-keys: ' + str(yKeys) + \
' , valid keys: ' + df.columns
self._X = np.asfarray(df.loc[:, xKeys])
self._Y = np.asfarray(df.loc[:, yKeys])
self._norm_y = nl.tool.Norm(self._Y)
self._Y = self._norm_y(self._Y)
def importArrays(self, X, Y, xKeys=None, yKeys=None):
"""
Args:
X (1D or 2D array_like of float):
X will be converted to 2D-array
(first index is data point index)
Y (1D or 2D array_like of float):
Y will be converted to 2D-array
(first index is data point index)
xKeys (1D array_like of string):
list of column keys for data selection
use self._xKeys keys if xKeys is None,
default: ['x0', 'x1', ... ]
yKeys (1D array_like of string):
list of column keys for data selection
use self._yKeys keys if yKeys is None,
default: ['y0', 'y1', ... ]
"""
#print('nn X.shape:', X.shape, 'Y.shape:', Y.shape)
self._X = np.atleast_2d(X)
self._Y = np.atleast_2d(Y)
#print('nn self.X.shape:',self._X.shape,'self.Y.shape:',self._Y.shape)
if self._X.shape[0] < self._X.shape[1]:
self._X = self._X.transpose()
if self._Y.shape[0] < self._Y.shape[1]:
self._Y = self._Y.transpose()
assert self._X.shape[0] == self._Y.shape[0], \
'input arrays incompatible [' + str(self._X.shape[0]) + \
'] vs. [' + str(self._Y.shape[0]) + ']\n' + \
'self._X: ' + str(self._X) + '\nself._Y: ' + str(self._Y)
if xKeys is None:
self._xKeys = ['x' + str(i) for i in range(self._X.shape[1])]
else:
self._xKeys = xKeys
if yKeys is None:
self._yKeys = ['y' + str(i) for i in range(self._Y.shape[1])]
else:
self._yKeys = yKeys
self._norm_y = nl.tool.Norm(self._Y)
self._Y = self._norm_y(self._Y)
def train(self, **kwargs):
"""
Args:
kwargs (dict, optional):
keywork arguments:
epochs (int):
max number of iterations of single trial,
default is 1000
errorf (function)
error function: { nl.error.MSE() | nl.error.SSE() },
default is MSE
f (method):
method f(x) from Hybrid trough Empirical,
default is None (f(x) = x)
goal (float):
limit of 'errorf' for stop of training (0 < goal < 1),
default is 1e-5
hidden (int or array_like of int):
array of number of neurons in hidden layers,
default is max(1, round(nPoint / (alpha * (nInp + nOut))))
outputf (function):
activation function of output layer,
default is TanSig()
plot (int):
control of plots showing training progress,
default is 0 (no plot)
regularization (float):
control of regularization (sum of all weights is added to
cost function of training, 0. <= regularization <= 1,
default is 0 (no effect of sum of all weights)
show (int):
control of information about training, if show=0: no print,
default is epochs // 10
silent (bool):
if True, no information is sent to console,
default is False
smartTrials (bool):
if False, perform all trials even if goal has been reached,
default is True
trainers (string or list of string):
space separated string
if 'all' or None, all training algorithms will be applied,
default is 'bfgs'
transf (function):
activation function of hidden layers,
default is TanSig()
trials (int):
maximum number of training trials,
default is 3
Returns:
(error, trainer, epochs) for best training trial
Note:
The best network has been assigned to 'self._net' before return
"""
assert not(self._X is None or self._Y is None), 'call import*() first'
epochs = kwargs.get('epochs', 1000)
errorf = kwargs.get('errorf', nl.error.MSE())
f = kwargs.get('f', None)
goal = kwargs.get('goal', 1e-5)
hidden = kwargs.get('hidden', None)
outputf = kwargs.get('outputf', None)
plot = kwargs.get('plot', 1)
regularization = kwargs.get('regularization', 0.0)
show = kwargs.get('show', None)
silent = kwargs.get('silent', False)
smartTrials = kwargs.get('smartTrials', True)
trainers = kwargs.get('trainers', 'bfgs')
transf = kwargs.get('transf', None)
trials = kwargs.get('trials', 3)
if not trainers:
trainers == 'all'
if isinstance(trainers, str):
if trainers == 'all':
trainers = 'cg gd gdx gdm gda rprop bfgs genetic'
trainers = trainers.split()
if f is not None:
trainers = 'genetic'
self._trainers = list(set(trainers)) # remove redundancy
if not self._trainers:
self._trainers = ['rprop bfgs']
if errorf is None:
errorf = nl.error.MSE()
if show is None:
show = epochs // 10
if silent:
plot = False
if isinstance(hidden, (int, float)):
hidden = list([int(hidden)])
if hidden is None or len(hidden) == 0:
alpha = 2 # 2..10, 2 supresses usually over-fitting
nPoint = self._X.shape[0]
nInp, nOut = self._X.shape[1], self._Y.shape[1]
nHidden = max(1, round(nPoint / (alpha * (nInp + nOut))))
print("+++ auto def of 'nHidden': " + str(nHidden))
hidden = [nHidden]
if not isinstance(hidden, list):
hidden = list(hidden)
size = hidden.copy()
size.append(self._Y.shape[1])
assert size[-1] == self._Y.shape[1]
trainfDict = {'genetic': None,
'bfgs': nl.train.train_bfgs,
'cg': nl.train.train_cg,
'gd': nl.train.train_gd,
'gda': nl.train.train_gda,
'gdm': nl.train.train_gdm,
'gdx': nl.train.train_gdx,
'rprop': nl.train.train_rprop
}
assert all([x in trainfDict for x in self._trainers])
if not silent:
print('+++ trainers:', self._trainers)
sequenceError = float('inf')
self._bestTrainer = self._trainers[0]
self._finalErrors = []
self._finalL2norms = []
self._bestEpochs = []
for trainer in self._trainers:
trainf = trainfDict[trainer]
trainerErr = float('inf')
trainerEpochs = None
trainerL2norm = None
if trainer == 'genetic':
net = NeurGen(nl.tool.minmax(self._X), size, f=f)
else:
net = nl.net.newff(nl.tool.minmax(self._X), size)
net.trainf = trainf
net.errorf = errorf
for jTrial in range(trials):
net.init()
if trainer == 'rprop':
trialErrors = net.train(self._X, self._Y,
epochs=epochs,
show=show, goal=goal)
else:
trialErrors = net.train(self._X, self._Y,
epochs=epochs,
show=show, goal=goal,
rr=regularization)
if sequenceError > trialErrors[-1]:
sequenceError = trialErrors[-1]
del self._net
self._net = net.copy()
if (trainerErr < goal and trainerEpochs > len(trialErrors)) or\
(trainerErr >= goal and trainerErr > trialErrors[-1]):
trainerErr = trialErrors[-1]
trainerEpochs = len(trialErrors)
trainerL2norm = np.sqrt(np.mean(np.square(
self.__call__(self._X) - self._norm_y.renorm(self._Y))))
if plot:
plt.plot(range(len(trialErrors)), trialErrors,
label='trial: ' + str(jTrial))
if smartTrials:
if trialErrors[-1] < goal:
break
self._finalErrors.append(trainerErr)
self._finalL2norms.append(trainerL2norm)
self._bestEpochs.append(trainerEpochs)
iBest = self._trainers.index(self._bestTrainer)
if trainerErr < self._finalErrors[iBest]:
self._bestTrainer = trainer
if plot:
plt.title("'" + trainer + "' mse:" +
str(round(trainerErr*1e3, 2)) + 'e-3 L2:' +
str(round(trainerL2norm, 3)) +
' [' + str(trainerEpochs) + ']')
plt.xlabel('epochs')
plt.ylabel('error')
plt.yscale('log', nonposy='clip')
plt.legend(bbox_to_anchor=(1.1, 1), loc='upper left')
plt.grid()
plt.show()
if not silent:
print(' ' + trainer + ':' + str(round(trainerErr, 5)) +
'[' + str(trainerEpochs) + '], ')
if plot:
self.plotTestWithTrainData()
iBest = self._trainers.index(self._bestTrainer)
if not silent:
if len(self._trainers) > 1:
print(" best trainer: '" +
self._trainers[iBest] +
"' out of: [" + ' '.join(self._trainers) + '], error:',
round(self._finalErrors[iBest], 5))
if len(self._finalErrors) > 1:
print(" (trainer:err): [", end='')
s = ''
for trainer, err in zip(self._trainers, self._finalErrors):
s += trainer + ':' + str(round(err, 5)) + ' '
print(s[:-2] + ']')
return self._finalErrors[iBest], self._trainers[iBest], \
self._bestEpochs[iBest]
def __call__(self, x=None, **kwargs):
return self.predict(x=x)
def predict(self, **kwargs):
x = kwargs.get('x', None)
if x is None:
x = self._X
assert x is not None, 'x is None'
assert self._net is not None, 'net is not trained'
x = np.asfarray(x)
if x.ndim == 1:
x = x.reshape(x.size, 1)
if x.shape[1] != self._net.ci:
x = np.transpose(x)
y = self._net.sim(x)
return self._norm_y.renorm(y)
def plotTestWithTrainData(self):
for trainer, error, epochs in zip(self._trainers, self._finalErrors,
self._bestEpochs):
y = self.__call__(self._X) # prediction
Y = self._norm_y.renorm(self._Y) # target
title = 'Train (' + trainer + ') mse: ' + \
str(round(error * 1e3, 2)) + 'e-3 [' + str(epochs) + ']'
plt.title(title)
for j, yTrainSub in enumerate(Y.T):
dy = np.subtract(y.T[j], yTrainSub)
for i, xTrainSub in enumerate(self._X.T):
label = self._xKeys[i] + ' & ' + self._yKeys[j]
plt.plot(xTrainSub, dy, label=label)
plt.xlabel('$x$')
plt.ylabel('$y_{pred} - y_{train}$')
plt.legend(bbox_to_anchor=(1.1, 1), loc='upper left')
plt.show()
plt.title(title)
for j, yTrainSub in enumerate(Y.T):
for i, xTrainSub in enumerate(self._X.T):
label = self._xKeys[i] + ' & ' + self._yKeys[j]
plt.plot(xTrainSub, y.T[j], label=label)
plt.plot(xTrainSub, yTrainSub, label=label +
' (target)', linestyle='', marker='*')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.legend(bbox_to_anchor=(1.1, 1), loc='upper left')
plt.show()
x = range(len(self._finalErrors))
y = self._finalErrors
y2 = np.asfarray(self._bestEpochs) * 1e-5
f = plt.figure()
ax = f.add_axes([.1, .1, .8, .8])
ax.plot(np.asfarray(x)+0.01, y2, color='b', label='epochs*1e-5')
ax.bar(x, y, align='center', color='r', label='MSE')
ax.set_xticks(x)
ax.set_xticklabels(self._trainers)
ax.set_yticks(np.add(y, y2))
plt.title('Final training errors')
plt.xlabel('trainer')
plt.ylabel('error')
plt.yscale('log', nonposy='clip')
plt.grid()
plt.legend(bbox_to_anchor=(1.1, 1), loc='upper left')
plt.show()
# Examples ####################################################################
if __name__ == '__main__':
ALL = 1
if 0 or ALL:
s = 'Example 1'
print('-' * len(s) + '\n' + s + '\n' + '-' * len(s))
X = np.linspace(-1.75 * np.pi, 1.75 * np.pi, 50)
Y = np.sin(X) * 10 + 0
net = Neural()
net.importArrays(X, Y)
# trainers: 'cg gd gdx gdm gda rprop bfgs'
net.train(hidden=[6], plot=1, epochs=500, goal=1e-5, trials=5,
trainers='cg gdx rprop bfgs', regularization=0.0, show=None)
dx = 0.5 * (X.max() - X.min())
x = np.linspace(X.min() - dx, X.max() + dx)
y = net(x)
L2_norm = np.sqrt(np.mean(np.square(y - Y)))
plt.title('Test (' + net._bestTrainer + ') L2: ' +
str(round(L2_norm, 2)))
plt.plot(x, y, '-', X, Y, '.')
plt.legend(['pred', 'targ', ])
plt.xlabel('x')
plt.ylabel('y(x)')
plt.show()
if 0 or ALL:
s = 'Example 2'
print('-' * len(s) + '\n' + s + '\n' + '-' * len(s))
df = DataFrame({'p0': [10, 20, 30, 40], 'p1': [11, 21, 31, 41],
'p2': [12, 22, 32, 42], 'r0': [31, 41, 51, 52],
'r1': [32, 42, 52, 55]})
xKeys = ['p0', 'p2']
yKeys = ['r0', 'r1']
net = Neural()
net.importDataFrame(df, xKeys, yKeys)
err = net.train(goal=1e-6, hidden=[10, 3], plot=1, epochs=2000,
trainers='cg gdx rprop bfgs', trials=10,
regularization=0.01, smartTrials=False)
if 0 or ALL:
s = 'Example 3'
print('-' * len(s) + '\n' + s + '\n' + '-' * len(s))
try:
from plotArrays import plotSurface, plotIsolines, plotIsoMap, \
plotWireframe
except ImportError:
print("??? import from 'plotArrays' failed")
X = [[10, 11], [11, 33], [33, 14], [37, 39], [20, 20]]
Y = [[10, 11], [12, 13], [35, 40], [58, 68], [22, 28]]
net = Neural()
net.importArrays(X, Y)
err = net.train(hidden=6, plot=1, epochs=1000, goal=1e-6,
trainers='cg gdx rprop bfgs', trials=5)
y = net(X)[:, 0]
X = np.asfarray(X)
Y = np.asfarray(Y)[:, 0]
dy = np.subtract(y, Y)
if X.shape[1] == 2:
plotWireframe(X[:, 0], X[:, 1], y, title='$y_{prd}$',
labels=['x', 'y', r'$Y_{targ}$'])
plotWireframe(X[:, 0], X[:, 1], Y, title='$Y_{trg}$',
labels=['x', 'y', r'$Y_{targ}$'])
plotWireframe(X[:, 0], X[:, 1], dy, title=r'$\Delta y$',
labels=['x', 'y', r'$\Delta y$'])
plotIsolines(X[:, 0], X[:, 1], y, title='$y_{prd}$')
plotIsoMap(X[:, 0], X[:, 1], y, title='$y_{prd}$')
plotIsoMap(X[:, 0], X[:, 1], Y, title='$Y_{trg}$')
plotIsolines(X[:, 0], X[:, 1], Y, title='$Y_{trg}$')
plotIsoMap(X[:, 0], X[:, 1], dy, title=r'$\Delta y$')
plotSurface(X[:, 0], X[:, 1], dy, title=r'$\Delta y$')
plotSurface(X[:, 0], X[:, 1], y, title='$y_{prd}$')
if 0 or ALL:
s = 'Example 4: newff and train without class Neural'
print('-' * len(s) + '\n' + s + '\n' + '-' * len(s))
X = np.atleast_2d(np.linspace(-7, 7, 20)).T
Y = np.sin(X) * 10
norm_y = nl.tool.Norm(Y)
YY = norm_y(Y)
net = nl.net.newff(nl.tool.minmax(X), [5, YY.shape[1]])
# net.trainf = nl.train.train_rprop # or:
net.trainf = nl.train.train_bfgs
error = net.train(X, YY, epochs=10000, show=100, goal=1e-6)
yTrain = norm_y.renorm(net.sim(X))
print(error[-1])
plt.subplot(211)
plt.plot(error)
plt.legend(['L2 error'])
plt.xlabel('Epoch number')
plt.ylabel('error (default SSE)')
xTest = np.atleast_2d(np.linspace(-5, 8, 150)).T
yTest = norm_y.renorm(net.sim(xTest)).ravel()
plt.subplot(212)
plt.plot(xTest, yTest, '-', X, Y, '.')
plt.legend(['pred', 'targ'])
plt.xlabel('x')
plt.ylabel('y(x)')
plt.show()
if 0 or ALL:
s = 'Example 5'
print('-' * len(s) + '\n' + s + '\n' + '-' * len(s))
try:
from plotArrays import plotSurface, plotIsolines, plotIsoMap
except ImportError:
print("??? import of 'plotArrays' failed")
X = np.atleast_2d(np.linspace(-2 * np.pi, 2 * np.pi, 50)).T
Y = np.sin(X) * 5
net = Neural()
net.importArrays(X, Y)
# trainers: 'cg gd gdx gdm gda rprop bfgs'
err = net.train(hidden=[8, 2], plot=1, epochs=2000, goal=1e-5,
trainers='rprop bfgs', trials=8)
y = net(X)
if X.shape[1] == 1:
plt.plot(X, y, label='pred')
plt.plot(X, Y, label='targ')
plt.legend()
plt.show()
elif X.shape[1] == 2:
plotSurface(X[:, 0], X[:, 1], y[:, 0], title='$y_{prd}$')
plotIsolines(X[:, 0], X[:, 1], y[:, 0], title='$y_{prd}$')
plotIsolines(X[:, 0], X[:, 1], Y[:, 0], title='$y_{trg}$')
dy = y - Y
if X.shape[1] == 2:
plotIsoMap(X[:, 0], X[:, 1], dy[:, 0],
title='$y_{prd} - y_{trg}$')
| lgpl-3.0 |
HolgerPeters/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/examples/example_enhanced_boxplots.py | 33 | 3179 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
# Necessary to make horizontal axis labels fit
plt.rcParams['figure.subplot.bottom'] = 0.23
data = sm.datasets.anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
# Group age by party ID.
age = [data.exog['age'][data.endog == id] for id in party_ID]
# Create a violin plot.
fig = plt.figure()
ax = fig.add_subplot(111)
sm.graphics.violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create a bean plot.
fig2 = plt.figure()
ax = fig2.add_subplot(111)
sm.graphics.beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create a jitter plot.
fig3 = plt.figure()
ax = fig3.add_subplot(111)
plot_opts={'cutoff_val':5, 'cutoff_type':'abs', 'label_fontsize':'small',
'label_rotation':30, 'violin_fc':(0.8, 0.8, 0.8),
'jitter_marker':'.', 'jitter_marker_size':3, 'bean_color':'#FF6F00',
'bean_mean_color':'#009D91'}
sm.graphics.beanplot(age, ax=ax, labels=labels, jitter=True,
plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create an asymmetrical jitter plot.
ix = data.exog['income'] < 16 # incomes < $30k
age = data.exog['age'][ix]
endog = data.endog[ix]
age_lower_income = [age[endog == id] for id in party_ID]
ix = data.exog['income'] >= 20 # incomes > $50k
age = data.exog['age'][ix]
endog = data.endog[ix]
age_higher_income = [age[endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts['violin_fc'] = (0.5, 0.5, 0.5)
plot_opts['bean_show_mean'] = False
plot_opts['bean_show_median'] = False
plot_opts['bean_legend_text'] = 'Income < \$30k'
plot_opts['cutoff_val'] = 10
sm.graphics.beanplot(age_lower_income, ax=ax, labels=labels, side='left',
jitter=True, plot_opts=plot_opts)
plot_opts['violin_fc'] = (0.7, 0.7, 0.7)
plot_opts['bean_color'] = '#009D91'
plot_opts['bean_legend_text'] = 'Income > \$50k'
sm.graphics.beanplot(age_higher_income, ax=ax, labels=labels, side='right',
jitter=True, plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Show all plots.
plt.show()
| bsd-3-clause |
davidgardenier/frbpoppy | tests/distance/repeaters_dm.py | 1 | 3376 | """Plot DM/SNR distributions of repeater populations."""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
from frbpoppy import CosmicPopulation, Survey, SurveyPopulation, plot
from frbpoppy import split_pop, pprint, hist
from tests.convenience import plot_aa_style, rel_path
DAYS = 4
INTERACTIVE_PLOT = False
PLOTTING_LIMIT_N_SRCS = 0
SNR = False
r = CosmicPopulation.simple(n_srcs=int(1e5), n_days=DAYS, repeaters=True)
r.set_dist(z_max=0.01)
r.set_lum(model='powerlaw', low=1e35, high=1e45, power=-1.5,
per_source='different')
r.set_time(model='poisson', rate=3)
r.set_dm_igm(model='ioka', slope=1000, std=0)
r.set_dm(mw=False, igm=True, host=False)
r.set_w('constant', value=1)
r.generate()
# Set up survey
survey = Survey('perfect', n_days=DAYS)
survey.set_beam(model='perfect')
survey.snr_limit = 1e6
surv_pop = SurveyPopulation(r, survey)
pprint(f'{r.n_bursts()}:{surv_pop.n_bursts()}')
pprint(f'{surv_pop.n_sources()} sources detected')
if r.n_bursts() < PLOTTING_LIMIT_N_SRCS:
pprint('Not sufficient FRB sources for plotting')
exit()
# Split population into seamingly one-off and repeater populations
mask = ((~np.isnan(surv_pop.frbs.time)).sum(1) > 1)
pop_rep, pop_one = split_pop(surv_pop, mask)
pop_rep.name += ' (> 1 burst)'
pop_one.name += ' (1 burst)'
if INTERACTIVE_PLOT:
plot(r, pop_rep, pop_one, tns=False, mute=False)
# Plot dm distribution
if SNR:
plot_aa_style(cols=2)
f, (ax1, ax2) = plt.subplots(1, 2)
else:
plot_aa_style(cols=1)
f, ax1 = plt.subplots(1, 1)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
pops = (r, pop_rep, pop_one)
for i, pop in enumerate(pops):
# Distinguish populations
if pop.name.endswith('(1 burst)'):
label = '1 burst'
linestyle = 'solid'
elif pop.name.endswith('(> 1 burst)'):
label = '$>$1 burst'
linestyle = 'dashed'
else:
label = 'cosmic'
linestyle = 'dashdot'
pprint(f'Number of bursts in {label}: {pop.n_bursts()}')
# Do stuff with data
dm = pop.frbs.dm
x, y = hist(dm)
x *= 200 # Normalise x-axis z=0.01, z=2
# Plot DM distributions
ax1.step(x, y, where='mid', linestyle=linestyle, label=label,
color=colors[i])
# Plot fluence distributions
snr = pop.frbs.snr
if snr is None:
continue
if not SNR:
continue
try:
ax2.step(*hist(snr, bin_type='log'), where='mid', linestyle=linestyle,
color=colors[i])
except ValueError:
pprint('Zero sources available to plot')
continue
ax1.set_xlabel(r'DM$_{\textrm{ex}}$ ($\textrm{pc}\ \textrm{cm}^{-3}$)')
ax1.set_ylabel('Fraction')
if SNR:
ax2.set_xlabel(r'SNR')
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.yaxis.tick_right()
plt.figlegend(loc='upper center', ncol=len(pops), framealpha=1)
else:
plt.figlegend(loc='upper center', ncol=3, framealpha=1, prop={'size': 8},
bbox_to_anchor=(0.5, 1.07), bbox_transform=ax1.transAxes)
# Test the difference between the distributions
rep = pops[1].frbs.dm[:20] # First 20 repeaters
one = pops[2].frbs.dm[:min([200, len(pops[2].frbs.dm)])] # First x one-offs
print(f'KS test: {ks_2samp(rep, one)}')
plt.tight_layout()
plt.savefig(rel_path('plots/rep_dm_dist.pdf'))
plt.clf()
| mit |
feiyanzhandui/tware | examples/mimic2/logreg.py | 2 | 7417 | import numpy as np
import sys
from sets import Set
#classifiers
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
#preprocess
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
#eval
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
#viz
from sklearn.metrics import roc_curve
import pylab as pl
from sklearn.metrics import confusion_matrix
def main():
diseases = [('infectious',1,140),
('metabolic',240,280),
('blood',280,290),
('neurologic',320,390),
('heart_hypertensive',401,406),
('heart_ischemic',410,415),
('heart_failure',428,429),
('pulmonary',460,520),
('digestive',520,580),
('renal_insufficiency',580,630)]
B = {d[0]: LogisticRegression(class_weight='auto', random_state=0) for d in diseases}
#B = {d[0]: RandomForestClassifier(n_estimators=100, max_features=None, n_jobs=-1, random_state=0) for d in diseases}
#classifiers = {d[0]:
#[#KNeighborsClassifier(3),
#SVC(kernel="linear", C=0.025),
#SVC(gamma=2, C=1),
#DecisionTreeClassifier(criterion='entropy',max_depth=4,min_samples_split=10,random_state=0)] for d in diseases}#,
#RandomForestClassifier(max_depth=10, n_estimators=100, max_features=1)] for d in diseases}#,
#AdaBoostClassifier()] for d in diseases}#,
#GaussianNB(),
#LDA(),
#QDA()]for d in diseases}
#RandomForestClassifier(n_estimators=200, max_features=None, n_jobs=-1, random_state=0)] for d in diseases}
#GradientBoostingClassifier(n_estimators=1000, max_depth=None, max_features=None)] for d in diseases}
#Pipeline([('feature_selection', LinearSVC(penalty='l1', dual=False)),
# ('classification', GaussianNB())])] for d in diseases}
pts = {}
for line in open(sys.argv[1]):
raw = line.split(",")
try:
icd9 = float(raw[1])
pt = raw[0]
age = float(raw[3])
if age >= 18:
if pt not in pts:
x = [float(a) for a in raw[2:]]
pts[pt] = (x, set())
for d in diseases:
if icd9 >= d[1] and icd9 < d[2]:
pts[pt][1].add(d[0])
break
except:
pass
X = []
Y = {d[0]: [] for d in diseases}
for pt,(feat,codes) in sorted(pts.items()):
X.append(feat)
for d in diseases:
if d[0] in codes:
Y[d[0]].append(1)
else:
Y[d[0]].append(0)
#scale
#X = MinMaxScaler().fit_transform(X)
#X = StandardScaler().fit_transform(X)
#i = 0
#while (True):
#Y_pred = {}
for disease,y in Y.items():
print '********* DISEASE: ' + disease
X_train,X_test,Y_train,Y_test = train_test_split(X, y, test_size=0.3,
random_state=0)
b = B[disease]
b.fit(X_train, Y_train)
y_pred = b.predict(X_test)
#Y_pred[disease] = y_pred
print classification_report(Y_test, y_pred)
#for d1,y_h1 in Y_pred.items():
# for d2,y_h2 in Y_pred.items():
# print d1, d2, np.corrcoef(y_h1, y_h2)[0][1]
#print confusion_matrix(Y_test, y_pred)
#Y_prob = b.predict_proba(X_test)
#print Y_prob
#Y_pred = [1 if y_prob[1] > 0.4 else 0 for y_prob in Y_prob]
#fpr,tpr,thresh = roc_curve(Y_test, Y_prob[:,1])
#pl.plot(fpr, tpr, lw=1, label='roc')
#pl.plot([0, 1], [0, 1], 'k--')
#pl.xlim([0.0, 1.0])
#pl.ylim([0.0, 1.0])
#pl.xlabel('FPR')
#pl.ylabel('TPR')
#pl.savefig('prefilter.pdf', format='pdf')
#cc = {}
#for d1,y_h1 in Y_h.items():
# for d2,y_h2 in Y_h.items():
# if d1 != d2:
# cc[(d1,d2)] = np.corrcoef(y_h1, y_h2)[0][1]
#maxcc = sorted(cc.items(), key=lambda x: x[1], reverse=True)[0]
#print '********** MAXCC= ', maxcc
#d1 = maxcc[0][0]
#d2 = maxcc[0][1]
#d = d1 + ',' + d2
#y1 = Y[d1]
#y2 = Y[d2]
#y = [y1[i] or y2[i] for i in range(len(y1))]
#del Y[d1]
#del Y[d2]
#Y[d] = y
#del B[d1]
#del B[d2]
#B[d] = RandomForestClassifier(n_estimators=100, max_features=None, n_jobs=100, random_state=0)
#i += 1
#if i > 3:
# break
#with open(k + '.dot', 'w') as f:
# f = tree.export_graphviz(B, out_file=f)
#cm = confusion_matrix(Y_test, B.predict(X_test))
#print cm
#visualize(cm, 'lr', k)
#visualize(confusion_matrix(Y, K_lr), "lr")
#X10 = SelectPercentile(chi2, percentile=10).fit_transform(X, Y)
#tree
#print 'Computing tree...'
#B_tree = {k: DecisionTreeClassifier() for k in categories}
#for k,v in B_tree.items():
# for train,test in StratifiedKFold(Y[k], 3):
# X_train = [X[i] for i in train]
# Y_train = [Y[k][i] for i in train]
# X_test = [X[i] for i in test]
# Y_test = [Y[k][i] for i in test]
# v.fit(X_train, Y_train)
# cm = confusion_matrix(Y_test, v.predict(X_test))
# print k + ':'
# print cm
#print "Computing tree..."
#B_tree = {k: DecisionTreeClassifier() for k in categories}
#for k,v in B_tree.items():
# v.fit(X, Y[k])
# print confusion_matrix(Y[k], v.predict(X))
#B_tree = DecisionTreeClassifier()
#B_tree.fit(X, Y)
#K_tree = B_tree.predict(X)
#visualize(confusion_matrix(Y, K_tree), "tree")
#with open(d + '.dot', 'w') as f:
# f = tree.export_graphviz(b, out_file=f)
# cc = {}
# for d1,y_h1 in Y_h.items():
# for d2,y_h2 in Y_h.items():
# if d1 != d2:
# cc[(d1,d2)] = np.corrcoef(y_h1, y_h2)[0][1]
# maxcc = sorted(cc.items(), key=lambda x: x[1], reverse=True)[0]
# print '********** MAXCC= ', maxcc
# d1 = maxcc[0][0]
# d2 = maxcc[0][1]
# d = d1 + ',' + d2
# y1 = Y[d1]
# y2 = Y[d2]
# y = [y1[i] or y2[i] for i in range(len(y1))]
# del Y[d1]
# del Y[d2]
# Y[d] = y
# del B[d1]
# del B[d2]
# B[d] = LogisticRegression(random_state=0)
#i += 1
#if i > 3:
# break
#with open(k + '.dot', 'w') as f:
# f = tree.export_graphviz(B, out_file=f)
#cm = confusion_matrix(Y_test, B.predict(X_test))
#print cm
#visualize(cm, 'lr', k)
#visualize(confusion_matrix(Y, K_lr), "lr")
#X10 = SelectPercentile(chi2, percentile=10).fit_transform(X, Y)
#B_tree = {k: DecisionTreeClassifier() for k in categories}
#for k,v in B_tree.items():
# v.fit(X, Y[k])
# print confusion_matrix(Y[k], v.predict(X))
#B_tree = DecisionTreeClassifier()
#B_tree.fit(X, Y)
#K_tree = B_tree.predict(X)
#visualize(confusion_matrix(Y, K_tree), "tree")
if __name__ == "__main__":
main()
| apache-2.0 |
rexshihaoren/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
jettisonjoe/openhtf | openhtf/core/measurements.py | 1 | 24982 | # Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measurements for OpenHTF.
Measurements in OpenHTF are used to represent values collected during a Test.
They can be numeric or string values, and can be configured such that the
OpenHTF framework will automatically check them against Pass/Fail criteria.
Measurements should not be used for large binary blobs, which are instead best
stored as Attachments (see attachments.py).
Measurements are described by the measurements.Measurement class. Essentially,
the Measurement class is used by test authors to declare measurements by name,
and to optionally provide unit, type, and validation information. Measurements
are attached to Test Phases using the @measurements.measures() decorator.
When measurements are output by the OpenHTF framework, the Measurement objects
are serialized into the 'measurements' field on the PhaseRecord, which contain
both descriptor fields, outcome (PASS/FAIL/UNSET), and the values themselves.
Validation of undimensioned measurements happens when they are set, so that
users of the HTTP API can see PASS/FAIL outcome on those measurements
immediately after they are set. Multidimensional measurements, however,
don't usually make sense to validate until all data is available, so they
instead enter a PARTIALLY_SET outcome state until the end of the test phase,
at which point they are validated and become with PASS or FAIL. Note that
validators of dimensioned measurements are only called at the end of the phase
if at least one value was set in the multidimensional measurement, otherwise it
remains UNSET, so that outcome fields for all measurements may be PASS, FAIL,
or UNSET.
# TODO(madsci): Make validators.py example.
See examples/validators.py for some examples on how to define and use custom
measurement validators.
Examples:
@measurements.measures(
measurements.Measurement(
'number_widgets').in_range(5, 10).doc(
'''This phase parameter tracks the number of widgets.'''))
@measurements.measures(
*(measurements.Measurement('level_%s' % lvl)
for lvl in ('none', 'some', 'all')))
def WidgetTestPhase(test):
test.measurements.number_widgets = 5
test.measurements.level_none = 10
"""
import collections
import logging
from enum import Enum
import mutablerecords
from openhtf import util
from openhtf.core import phase_descriptor
from openhtf.util import data
from openhtf.util import validators
from openhtf.util import units
import six
try:
import pandas
except ImportError:
pandas = None
_LOG = logging.getLogger(__name__)
class InvalidDimensionsError(Exception):
"""Raised when there is a problem with measurement dimensions."""
class InvalidMeasurementType(Exception):
"""Raised when an unexpected measurement type is given."""
class MeasurementNotSetError(Exception):
"""Raised when a measurement is accessed that hasn't been set."""
class NotAMeasurementError(Exception):
"""Raised if an invalid measurement name is accessed."""
class DuplicateNameError(Exception):
"""An exception which occurs when a measurement name collision occurs."""
# Only multidimensional measurements can be 'PARTIALLY_SET', and can never be in
# that state after their respective phase has completed (they must transition to
# either PASS or FAIL at that point).
Outcome = Enum('Outcome', ['PASS', 'FAIL', 'UNSET', 'PARTIALLY_SET'])
class Measurement( # pylint: disable=no-init
mutablerecords.Record(
'Measurement', ['name'],
{'units': None, 'dimensions': None, 'docstring': None,
'_notification_cb': None,
'validators': list,
'outcome': Outcome.UNSET,
'measured_value': None,
'_cached': None})):
"""Record encapsulating descriptive data for a measurement.
This record includes an _asdict() method so it can be easily output. Output
is as you would expect, a dict mapping non-None fields to their values
(validators are stringified with str()).
Attributes:
name: Name of the measurement.
docstring: Optional string describing this measurement.
units: UOM code of the units for the measurement being taken.
dimensions: Tuple of UOM codes for units of dimensions.
validators: List of callable validator objects to perform pass/fail checks.
outcome: One of the Outcome() enumeration values, starting at UNSET.
measured_value: An instance of MeasuredValue or DimensionedMeasuredValue
containing the value(s) of this Measurement that have been set, if any.
_cached: A cached dict representation of this measurement created initially
during as_base_types and updated in place to save allocation time.
"""
def __init__(self, name, **kwargs):
super(Measurement, self).__init__(name, **kwargs)
if 'measured_value' not in kwargs:
self._initialize_value()
def _initialize_value(self):
if self.measured_value and self.measured_value.is_value_set:
raise ValueError('Cannot update a Measurement once a value is set.')
if self.dimensions:
self.measured_value = DimensionedMeasuredValue(
self.name, len(self.dimensions))
else:
self.measured_value = MeasuredValue(self.name)
def __setattr__(self, attr, value):
super(Measurement, self).__setattr__(attr, value)
# When dimensions changes, we may need to update our measured_value type.
if attr == 'dimensions':
self._initialize_value()
def __setstate__(self, state):
"""Set this record's state during unpickling.
This override is necessary to ensure that the the _initialize_value check
is skipped during unpickling.
"""
dimensions = state.pop('dimensions')
super(Measurement, self).__setstate__(state)
object.__setattr__(self, 'dimensions', dimensions)
def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self
def notify_value_set(self):
if self.dimensions:
self.outcome = Outcome.PARTIALLY_SET
else:
self.validate()
if self._notification_cb:
self._notification_cb()
def doc(self, docstring):
"""Set this Measurement's docstring, returns self for chaining."""
self.docstring = docstring
return self
def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc
def _maybe_make_dimension(self, dimension):
"""Return a `measurements.Dimension` instance."""
# For backwards compatibility the argument can be either a Dimension, a
# string or a `units.UnitDescriptor`.
if isinstance(dimension, Dimension):
return dimension
if isinstance(dimension, units.UnitDescriptor):
return Dimension.from_unit_descriptor(dimension)
if isinstance(dimension, str):
return Dimension.from_string(dimension)
raise TypeError('Cannot convert %s to a dimension', dimension)
def with_units(self, unit_desc):
"""Declare the units for this Measurement, returns self for chaining."""
self.units = self._maybe_make_unit_desc(unit_desc)
return self
def with_dimensions(self, *dimensions):
"""Declare dimensions for this Measurement, returns self for chaining."""
self.dimensions = tuple(
self._maybe_make_dimension(dim) for dim in dimensions)
self._cached = None
return self
def with_validator(self, validator):
"""Add a validator callback to this Measurement, chainable."""
if not callable(validator):
raise ValueError('Validator must be callable', validator)
self.validators.append(validator)
self._cached = None
return self
def with_args(self, **kwargs):
"""String substitution for names and docstrings."""
validators = [
validator.with_args(**kwargs)
if hasattr(validator, 'with_args') else validator
for validator in self.validators
]
return mutablerecords.CopyRecord(
self, name=util.format_string(self.name, kwargs),
docstring=util.format_string(self.docstring, kwargs),
validators=validators,
_cached=None,
)
def __getattr__(self, attr): # pylint: disable=invalid-name
"""Support our default set of validators as direct attributes."""
# Don't provide a back door to validators.py private stuff accidentally.
if attr.startswith('_') or not validators.has_validator(attr):
raise AttributeError("'%s' object has no attribute '%s'" % (
type(self).__name__, attr))
# Create a wrapper to invoke the attribute from within validators.
def _with_validator(*args, **kwargs): # pylint: disable=invalid-name
return self.with_validator(
validators.create_validator(attr, *args, **kwargs))
return _with_validator
def validate(self):
"""Validate this measurement and update its 'outcome' field."""
# PASS if all our validators return True, otherwise FAIL.
try:
if all(v(self.measured_value.value) for v in self.validators):
self.outcome = Outcome.PASS
else:
self.outcome = Outcome.FAIL
return self
except Exception as e: # pylint: disable=bare-except
_LOG.error('Validation for measurement %s raised an exception %s.',
self.name, e)
self.outcome = Outcome.FAIL
raise
finally:
if self._cached:
self._cached['outcome'] = self.outcome.name
def as_base_types(self):
"""Convert this measurement to a dict of basic types."""
if not self._cached:
# Create the single cache file the first time this is called.
self._cached = {
'name': self.name,
'outcome': self.outcome.name,
}
if self.validators:
self._cached['validators'] = data.convert_to_base_types(
tuple(str(v) for v in self.validators))
if self.dimensions:
self._cached['dimensions'] = data.convert_to_base_types(self.dimensions)
if self.units:
self._cached['units'] = data.convert_to_base_types(self.units)
if self.docstring:
self._cached['docstring'] = self.docstring
if self.measured_value.is_value_set:
self._cached['measured_value'] = self.measured_value.basetype_value()
return self._cached
def to_dataframe(self, columns=None):
"""Convert a multi-dim to a pandas dataframe."""
if not isinstance(self.measured_value, DimensionedMeasuredValue):
raise TypeError(
'Only a dimensioned measurement can be converted to a DataFrame')
if columns is None:
columns = [d.name for d in self.dimensions]
columns += [self.units.name if self.units else 'value']
dataframe = self.measured_value.to_dataframe(columns)
return dataframe
class MeasuredValue(
mutablerecords.Record('MeasuredValue', ['name'],
{'stored_value': None, 'is_value_set': False,
'_cached_value': None})):
"""Class encapsulating actual values measured.
Note that this is really just a value wrapper with some sanity checks. See
Declaration for the class that handles the descriptive aspect of the
measurement. This class is the type that Collection actually stores in
its _values attribute.
This class stores values for un-dimensioned (single-value) measurements, for
dimensioned values, see the DimensionedMeasuredValue. The interfaces are very
similar, but differ slightly; the important part is the get_value() interface
on both of them.
The _cached_value is the base type represention of the stored_value when that
is set.
"""
def __str__(self):
return str(self.value) if self.is_value_set else 'UNSET'
def __eq__(self, other):
return (type(self) == type(other) and self.name == other.name and
self.is_value_set == other.is_value_set and
self.stored_value == other.stored_value)
def __ne__(self, other):
return not self.__eq__(other)
@property
def value(self):
if not self.is_value_set:
raise MeasurementNotSetError('Measurement not yet set', self.name)
return self.stored_value
def basetype_value(self):
return self._cached_value
def set(self, value):
"""Set the value for this measurement, with some sanity checks."""
if self.is_value_set:
# While we want to *allow* re-setting previously set measurements, we'd
# rather promote the use of multidimensional measurements instead of
# discarding data, so we make this somewhat chatty.
_LOG.warning(
'Overriding previous measurement %s value of %s with %s, the old '
'value will be lost. Use a dimensioned measurement if you need to '
'save multiple values.', self.name, self.stored_value, value)
if value is None:
_LOG.warning('Measurement %s is set to None', self.name)
self.stored_value = value
self._cached_value = data.convert_to_base_types(value)
self.is_value_set = True
class Dimension(object):
"""Dimension for multi-dim Measurements.
Dimensions optionally include a unit and a description. This is intended
as a drop-in replacement for UnitDescriptor for backwards compatibility.
"""
__slots__ = ['_description', '_unit', '_cached_dict']
def __init__(self, description='', unit=units.NO_DIMENSION):
self._description = description
self._unit = unit
self._cached_dict = data.convert_to_base_types({
'code': self.code,
'description': self.description,
'name': self.name,
'suffix': self.suffix,
})
def __eq__(self, other):
return (self.description == other.description and self.unit == other.unit)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._asdict())
@classmethod
def from_unit_descriptor(cls, unit_desc):
return cls(unit=unit_desc)
@classmethod
def from_string(cls, string):
"""Convert a string into a Dimension"""
# Note: There is some ambiguity as to whether the string passed is intended
# to become a unit looked up by name or suffix, or a Dimension descriptor.
if string in units.UNITS_BY_ALL:
return cls(description=string, unit=units.Unit(string))
else:
return cls(description=string)
@property
def description(self):
return self._description
@property
def unit(self):
return self._unit
@property
def code(self):
"""Provides backwards compatibility to `units.UnitDescriptor` api."""
return self._unit.code
@property
def suffix(self):
"""Provides backwards compatibility to `units.UnitDescriptor` api."""
return self._unit.suffix
@property
def name(self):
"""Provides backwards compatibility to `units.UnitDescriptor` api."""
return self._description or self._unit.name
def _asdict(self):
return self._cached_dict
class DimensionedMeasuredValue(mutablerecords.Record(
'DimensionedMeasuredValue', ['name', 'num_dimensions'],
{'notify_value_set': None,
'value_dict': collections.OrderedDict,
'_cached_basetype_values': list})):
"""Class encapsulating actual values measured.
See the MeasuredValue class docstring for more info. This class provides a
dict-like interface for indexing into dimensioned measurements.
The _cached_basetype_values is a cached list of the dimensioned entries in
order of being set. Each list entry is a tuple that is composed of the key,
then the value. This is set to None if a previous measurement is overridden;
in such a case, the list is fully reconstructed on the next call to
basetype_value.
"""
def __str__(self):
return str(self.value) if self.is_value_set else 'UNSET'
def with_notify(self, notify_value_set):
self.notify_value_set = notify_value_set
return self
@property
def is_value_set(self):
return len(self.value_dict) > 0
def __iter__(self): # pylint: disable=invalid-name
"""Iterate over items, allows easy conversion to a dict."""
return iter(six.iteritems(self.value_dict))
def __setitem__(self, coordinates, value): # pylint: disable=invalid-name
coordinates_len = len(coordinates) if hasattr(coordinates, '__len__') else 1
if coordinates_len != self.num_dimensions:
raise InvalidDimensionsError(
'Expected %s-dimensional coordinates, got %s' % (self.num_dimensions,
coordinates_len))
# Wrap single dimensions in a tuple so we can assume value_dict keys are
# always tuples later.
if self.num_dimensions == 1:
coordinates = (coordinates,)
if coordinates in self.value_dict:
_LOG.warning(
'Overriding previous measurement %s[%s] value of %s with %s',
self.name, coordinates, self.value_dict[coordinates], value)
self._cached_basetype_values = None
elif self._cached_basetype_values is not None:
self._cached_basetype_values.append(data.convert_to_base_types(
coordinates + (value,)))
self.value_dict[coordinates] = value
if self.notify_value_set:
self.notify_value_set()
def __getitem__(self, coordinates): # pylint: disable=invalid-name
# Wrap single dimensions in a tuple so we can assume value_dict keys are
# always tuples later.
if self.num_dimensions == 1:
coordinates = (coordinates,)
return self.value_dict[coordinates]
@property
def value(self):
"""The values stored in this record.
Returns:
A list of tuples; the last element of each tuple will be the measured
value, the other elements will be the associated coordinates. The tuples
are output in the order in which they were set.
"""
if not self.is_value_set:
raise MeasurementNotSetError('Measurement not yet set', self.name)
return [dimensions + (value,) for dimensions, value in
six.iteritems(self.value_dict)]
def basetype_value(self):
if self._cached_basetype_values is None:
self._cached_basetype_values = list(
data.convert_to_base_types(coordinates + (value,))
for coordinates, value in six.iteritems(self.value_dict))
return self._cached_basetype_values
def to_dataframe(self, columns=None):
"""Converts to a `pandas.DataFrame`"""
if not self.is_value_set:
raise ValueError('Value must be set before converting to a DataFrame.')
if not pandas:
raise RuntimeError('Install pandas to convert to pandas.DataFrame')
return pandas.DataFrame.from_records(self.value, columns=columns)
class Collection(mutablerecords.Record('Collection', ['_measurements'])):
"""Encapsulates a collection of measurements.
This collection can have measurement values retrieved and set via getters and
setters that provide attribute and dict-like interfaces.
A Collection is created with a list of Measurement objects (defined above).
Measurements can't be added after initialization, only accessed and set.
MeasuredValue values can be set as attributes (see below). They can also be
read as attributes, but you get a DimensionedMeasuredValue object back if the
measurement accessed is dimensioned (this is how setting of dimensioned
measurements works, and so is unavoidable).
Iterating over a Collection results in (key, value) tuples of only set
measurements and their values. As such, a Collection can be converted to
a dict if you want to see all of a dimensioned measurement's values.
Alternatively, DimensionedMeasuredValue objects can also be converted directly
to dicts with dict(), as they also support an __iter__() interface.
This class is intended for use only internally within the OpenHTF framework.
Example:
from openhtf.util import measurements
from openhtf.util.units import UOM
self.measurements = measurements.Collection([
measurements.Measurement('widget_height'),
measurements.Measurement('widget_freq_response').with_dimensions(
UOM['HERTZ'])])
self.measurements.widget_height = 3
print self.measurements.widget_height # 3
self.measurements.widget_freq_response[5] = 10
print self.measurements.widget_freq_response[5] # 10
self.measurements.widget_freq_response[6] = 11
print dict(self.measurements.widget_freq_response)
# {5: 10, 6: 11}
# Not recommended, but you can also do this. This is intended only for
# framework internal use when generating the output test record.
print dict(self.measurements)['widget_freq_response']
# [(5, 10), (6, 11)]
"""
def _assert_valid_key(self, name):
"""Raises if name is not a valid measurement."""
if name not in self._measurements:
raise NotAMeasurementError('Not a measurement', name, self._measurements)
def __iter__(self): # pylint: disable=invalid-name
"""Extract each MeasurementValue's value."""
return ((key, meas.measured_value.value)
for key, meas in six.iteritems(self._measurements))
def __setattr__(self, name, value): # pylint: disable=invalid-name
self[name] = value
def __getattr__(self, name): # pylint: disable=invalid-name
return self[name]
def __setitem__(self, name, value): # pylint: disable=invalid-name
self._assert_valid_key(name)
if self._measurements[name].dimensions:
raise InvalidDimensionsError(
'Cannot set dimensioned measurement without indices')
self._measurements[name].measured_value.set(value)
self._measurements[name].notify_value_set()
def __getitem__(self, name): # pylint: disable=invalid-name
self._assert_valid_key(name)
if self._measurements[name].dimensions:
return self._measurements[name].measured_value.with_notify(
self._measurements[name].notify_value_set)
# Return the MeasuredValue's value, MeasuredValue will raise if not set.
return self._measurements[name].measured_value.value
def measures(*measurements, **kwargs):
"""Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase.
"""
def _maybe_make(meas):
"""Turn strings into Measurement objects if necessary."""
if isinstance(meas, Measurement):
return meas
elif isinstance(meas, six.string_types):
return Measurement(meas, **kwargs)
raise InvalidMeasurementType('Expected Measurement or string', meas)
# In case we're declaring a measurement inline, we can only declare one.
if kwargs and len(measurements) != 1:
raise InvalidMeasurementType(
'If @measures kwargs are provided, a single measurement name must be '
'provided as a positional arg first.')
# Unlikely, but let's make sure we don't allow overriding initial outcome.
if 'outcome' in kwargs:
raise ValueError('Cannot specify outcome in measurement declaration!')
measurements = [_maybe_make(meas) for meas in measurements]
# 'measurements' is guaranteed to be a list of Measurement objects here.
def decorate(wrapped_phase):
"""Phase decorator to be returned."""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)
duplicate_names = (set(m.name for m in measurements) &
set(m.name for m in phase.measurements))
if duplicate_names:
raise DuplicateNameError('Measurement names duplicated', duplicate_names)
phase.measurements.extend(measurements)
return phase
return decorate
| apache-2.0 |
flightgong/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
DiCarloLab-Delft/PycQED_py3 | pycqed/analysis/tools/plotting.py | 1 | 25541 | '''
Contain the plotting tools portion of the analysis toolbox
Note: There is an equivalent file for analysis v2, include your new code there,
unless it is only inteded for analysis v1
'''
import lmfit
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
import numpy as np
import matplotlib.colors as col
import hsluv
from scipy.interpolate import interp1d
from matplotlib.patches import Rectangle, ConnectionPatch
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
single_col_figsize = (3.39, golden_mean*3.39)
double_col_figsize = (6.9, golden_mean*6.9)
thesis_col_figsize = (12.2/2.54, golden_mean*12.2/2.54)
def set_xlabel(axis, label, unit=None, latexify_ticks=False, **kw):
"""
Add a unit aware x-label to an axis object.
Args:
axis: matplotlib axis object to set label on
label: the desired label
unit: the unit
**kw : keyword argument to be passed to matplotlib.set_xlabel
"""
if unit is not None and unit != '':
xticks = axis.get_xticks()
scale_factor, unit = SI_prefix_and_scale_factor(
val=max(abs(xticks)), unit=unit)
tick_str = '{:.4g}' if not latexify_ticks else r'${:.4g}$'
formatter = matplotlib.ticker.FuncFormatter(
lambda x, pos: tick_str.format(x * scale_factor))
axis.xaxis.set_major_formatter(formatter)
axis.set_xlabel(label + ' ({})'.format(unit), **kw)
else:
axis.set_xlabel(label, **kw)
return axis
def set_ylabel(axis, label, unit=None, latexify_ticks=False, **kw):
"""
Add a unit aware y-label to an axis object.
Args:
axis: matplotlib axis object to set label on
label: the desired label
unit: the unit
**kw : keyword argument to be passed to matplotlib.set_ylabel
"""
if unit is not None and unit != '':
yticks = axis.get_yticks()
scale_factor, unit = SI_prefix_and_scale_factor(
val=max(abs(yticks)), unit=unit)
tick_str = '{:.6g}' if not latexify_ticks else r'${:.6g}$'
formatter = matplotlib.ticker.FuncFormatter(
lambda x, pos: tick_str.format(x * scale_factor))
axis.yaxis.set_major_formatter(formatter)
axis.set_ylabel(label + ' ({})'.format(unit), **kw)
else:
axis.set_ylabel(label, **kw)
return axis
def set_cbarlabel(cbar, label, unit=None, **kw):
"""
Add a unit aware z-label to a colorbar object
Args:
cbar: colorbar object to set label on
label: the desired label
unit: the unit
**kw : keyword argument to be passed to cbar.set_label
"""
if unit is not None and unit != '':
zticks = cbar.get_ticks()
scale_factor, unit = SI_prefix_and_scale_factor(
val=max(abs(zticks)), unit=unit)
cbar.set_ticks(zticks)
cbar.set_ticklabels(zticks*scale_factor)
cbar.set_label(label + ' ({})'.format(unit))
else:
cbar.set_label(label, **kw)
return cbar
SI_PREFIXES = dict(zip(range(-24, 25, 3), 'yzafpnμm kMGTPEZY'))
SI_PREFIXES[0] = ""
# N.B. not all of these are SI units, however, all of these support SI prefixes
SI_UNITS = 'm,s,g,W,J,V,A,F,T,Hz,Ohm,S,N,C,px,b,B,K,Bar,Vpeak,Vpp,Vp,Vrms,$\Phi_0$,A/s'.split(
',')
def SI_prefix_and_scale_factor(val, unit=None):
"""
Takes in a value and unit and if applicable returns the proper
scale factor and SI prefix.
Args:
val (float) : the value
unit (str) : the unit of the value
returns:
scale_factor (float) : scale_factor needed to convert value
unit (str) : unit including the prefix
"""
if unit in SI_UNITS:
try:
with np.errstate(all="ignore"):
prefix_power = np.log10(abs(val))//3 * 3
prefix = SI_PREFIXES[prefix_power]
# Greek symbols not supported in tex
if plt.rcParams['text.usetex'] and prefix == 'μ':
prefix = r'$\mu$'
return 10 ** -prefix_power, prefix + unit
except (KeyError, TypeError):
pass
return 1, unit if unit is not None else ""
def SI_val_to_msg_str(val: float, unit: str=None, return_type=str):
"""
Takes in a value with optional unit and returns a string tuple consisting
of (value_str, unit) where the value and unit are rescaled according to
SI prefixes, IF the unit is an SI unit (according to the comprehensive list
of SI units in this file ;).
the value_str is of the type specified in return_type (str) by default.
"""
sc, new_unit = SI_prefix_and_scale_factor(val, unit)
try:
new_val = sc*val
except TypeError:
return return_type(val), unit
return return_type(new_val), new_unit
def format_lmfit_par(par_name: str, lmfit_par, end_char=''):
"""Format an lmfit par to a string of value with uncertainty."""
val_string = par_name
val_string += ': {:.4f}'.format(lmfit_par.value)
if lmfit_par.stderr is not None:
val_string += r'$\pm$' + '{:.4f}'.format(lmfit_par.stderr)
else:
val_string += r'$\pm$' + 'NaN'
val_string += end_char
return val_string
def data_to_table_png(data: list, filename: str, title: str='',
close_fig: bool=True):
"""
Takes in a list of list containing the data to be
put in a table and saves this as a png.
"""
# Determine the shape of the table
nrows, ncols = np.shape(data)
hcell, wcell = 0.3, 2.
hpad, wpad = 0.5, 0
fig = plt.figure(figsize=(ncols*wcell+wpad, nrows*hcell+hpad))
ax = fig.add_subplot(111)
ax.axis('off')
# make the table
table = ax.table(cellText=data,
loc='center')
# rescale to make it more readable
table.scale(1, 1.5)
ax.set_title(title)
fig.tight_layout()
plt.savefig(filename, dpi=450)
if close_fig:
plt.close(fig)
def annotate_point_pair(ax, text, xy_start, xy_end, xycoords='data',
text_offset=(-10, -5), arrowprops=None, **kw):
'''
Annotates two points by connecting them with an arrow.
The annotation text is placed near the center of the arrow.
Function copied from "http://stackoverflow.com/questions/14612637/
plotting-distance-arrows-in-technical-drawing/32522399#32522399"
Modified by Adriaan to allows specifying offset of text in two directions.
'''
if arrowprops is None:
arrowprops = dict(arrowstyle='<->')
assert isinstance(text, str)
xy_text = ((xy_start[0] + xy_end[0])/2., (xy_start[1] + xy_end[1])/2.)
arrow_vector = xy_end[0]-xy_start[0] + (xy_end[1] - xy_start[1]) * 1j
arrow_angle = np.angle(arrow_vector)
text_angle = arrow_angle - 0.5*np.pi
ax.annotate(
'', xy=xy_end, xycoords=xycoords,
xytext=xy_start, textcoords=xycoords,
arrowprops=arrowprops, **kw)
label = ax.annotate(
text,
xy=xy_text,
xycoords=xycoords,
xytext=(text_offset[0] * np.cos(text_angle) +
text_offset[1] * np.sin(text_angle),
text_offset[0] * np.sin(text_angle) +
text_offset[1] * np.cos(text_angle)),
textcoords='offset points', **kw)
return label
def get_color_order(i, max_num, cmap='viridis'):
# take a blue to red scale from 0 to max_num
# uses HSV system, H_red = 0, H_green = 1/3 H_blue=2/3
# return colors.hsv_to_rgb(2.*float(i)/(float(max_num)*3.), 1., 1.)
print('It is recommended to use the updated function "get_color_cycle".')
if isinstance(cmap, str):
cmap = cm.get_cmap(cmap)
return cmap((i/max_num) % 1)
def get_color_from_cmap(i, max_num):
pass
def plot_lmfit_res(fit_res, ax, plot_init: bool=False,
plot_numpoints: int=1000,
plot_kw: dict ={}, plot_init_kw: dict = {}, **kw):
"""
Plot the result of an lmfit optimization.
Args:
fit_res: lmfit result object.
ax: matplotlib axis object to plot on.
plot_init: if True plots the initial guess of the fit.
plot_numpoints: number of points to use for interpolating the fit.
plot_kw: dictionary of options to pass to the plot of the fit.
plot_init_kw dictionary of options to pass to the plot of the
initial guess.
**kw **kwargs, unused only here to match call signature.
Return:
axis : Returns matplotlib axis object on which the plot
was performed.
"""
if hasattr(fit_res, 'model'):
model = fit_res.model
# Testing input
if not (isinstance(model, lmfit.model.Model) or
isinstance(model, lmfit.model.ModelResult)):
raise TypeError(
'The passed item in "fit_res" needs to be'
' a fitting model, but is {}'.format(type(model)))
if len(model.independent_vars) == 1:
independent_var = model.independent_vars[0]
else:
raise ValueError('Fit can only be plotted if the model function'
' has one independent variable.')
x_arr = fit_res.userkws[independent_var]
xvals = np.linspace(np.min(x_arr), np.max(x_arr),
plot_numpoints)
yvals = model.eval(fit_res.params,
**{independent_var: xvals})
if plot_init:
yvals_init = model.eval(fit_res.init_params,
**{independent_var: xvals})
else: # case for the minimizer fit
# testing input
fit_xvals = fit_res.userkws
if len(fit_xvals.keys()) == 1:
independent_var = list(fit_xvals.keys())[0]
else:
raise ValueError('Fit can only be plotted if the model function'
' has one independent variable.')
x_arr = fit_res.userkws[independent_var]
xvals = np.linspace(np.min(x_arr), np.max(x_arr),
plot_numpoints)
fit_fn = fit_res.fit_fn
yvals = fit_fn(**fit_res.params,
**{independent_var: xvals})
if plot_init:
yvals_init = fit_fn(**fit_res.init_params,
**{independent_var: xvals})
# acutal plotting
ax.plot(xvals, yvals, **plot_kw)
if plot_init:
ax.plot(xvals, yvals_init, **plot_init_kw)
return ax
def flex_color_plot_vs_x(xvals, yvals, zvals, ax=None,
xwidth=None,
normalize=False, log=False,
save_name=None,
cmap='viridis',
clim=[None, None],
alpha=1,
**kw):
"""
Display a color figure for something like a tracked DAC sweep.
xvals should be a single vector with values for the primary sweep.
yvals and zvals should be a list of arrays with the sweep points and
measured values.
"""
# create a figure and set of axes
if ax is None:
fig = plt.figure(figsize=(12, 7))
ax = fig.add_subplot(111)
# calculate coordinates for corners of color blocks
# x coordinates
if xwidth is None:
xvals = np.array(xvals)
xvertices = np.zeros(np.array(xvals.shape)+1)
dx = abs(np.max(xvals)-np.min(xvals))/len(xvals)
xvertices[1:-1] = (xvals[:-1]+xvals[1:])/2.
xvertices[0] = xvals[0] - dx/2
xvertices[-1] = xvals[-1] + dx/2
else:
xvertices = []
for xval in xvals:
xvertices.append(xval+np.array([-0.5, 0.5])*xwidth)
# y coordinates
yvertices = []
for xx in range(len(xvals)):
# Important to sort arguments in case unsorted (e.g., FFT freqs)
sorted_yarguments = yvals[xx].argsort()
yvals[xx] = yvals[xx][sorted_yarguments]
zvals[xx] = zvals[xx][sorted_yarguments]
yvertices.append(np.zeros(np.array(yvals[xx].shape)+1))
yvertices[xx][1:-1] = (yvals[xx][:-1]+yvals[xx][1:])/2.
yvertices[xx][0] = yvals[xx][0] - (yvals[xx][1]-yvals[xx][0])/2
yvertices[xx][-1] = yvals[xx][-1] + (yvals[xx][-1]-yvals[xx][-2])/2
# normalized plot
if normalize:
zvals[xx] /= np.mean(zvals[xx])
# logarithmic plot
if log:
zvals[xx] = np.log(zvals[xx])/np.log(10)
# add blocks to plot
colormaps = []
for xx in range(len(xvals)):
tempzvals = np.array(
[np.append(zvals[xx], np.array(0)),
np.append(zvals[xx], np.array(0))]).transpose()
if xwidth is None:
colormaps.append(ax.pcolor(xvertices[xx:xx+2],
yvertices[xx],
tempzvals,
cmap=cmap, vmin=clim[0], vmax=clim[1],
alpha=alpha))
else:
colormaps.append(
ax.pcolor(xvertices[xx], yvertices[xx], tempzvals, cmap=cmap,
alpha=alpha))
return {'fig': ax.figure, 'ax': ax,
'cmap': colormaps[0], 'cmaps': colormaps}
def flex_colormesh_plot_vs_xy(xvals, yvals, zvals, ax=None,
normalize=False, log=False,
save_name=None, **kw):
"""
Add a rectangular block to a color plot using pcolormesh.
xvals and yvals should be single vectors with values for the
two sweep points.
zvals should be a list of arrays with the measured values with shape
(len(yvals), len(xvals)).
**grid-orientation**
The grid orientation for the zvals is the same as is used in
ax.pcolormesh.
Note that the column index corresponds to the x-coordinate,
and the row index corresponds to y.
This can be counterintuitive: zvals(y_idx, x_idx)
and can be inconsistent with some arrays of zvals
(such as a 2D histogram from numpy).
"""
xvals = np.array(xvals)
yvals = np.array(yvals)
# First, we need to sort the data as otherwise we get odd plotting
# artefacts. An example is e.g., plotting a fourier transform
sorted_x_arguments = xvals.argsort()
xvals = xvals[sorted_x_arguments]
sorted_y_arguments = yvals.argsort()
yvals = yvals[sorted_y_arguments]
zvals = zvals[:, sorted_x_arguments]
zvals = zvals[sorted_y_arguments, :]
# create a figure and set of axes
if ax is None:
fig = plt.figure(figsize=(12, 7))
ax = fig.add_subplot(111)
# convert xvals and yvals to single dimension arrays
xvals = np.squeeze(np.array(xvals))
yvals = np.squeeze(np.array(yvals))
# calculate coordinates for corners of color blocks
# x coordinates
xvertices = np.zeros(np.array(xvals.shape)+1)
xvertices[1:-1] = (xvals[:-1]+xvals[1:])/2.
xvertices[0] = xvals[0] - (xvals[1]-xvals[0])/2
xvertices[-1] = xvals[-1] + (xvals[-1]-xvals[-2])/2
# y coordinates
yvertices = np.zeros(np.array(yvals.shape)+1)
yvertices[1:-1] = (yvals[:-1]+yvals[1:])/2.
yvertices[0] = yvals[0] - (yvals[1]-yvals[0])/2
yvertices[-1] = yvals[-1] + (yvals[-1]-yvals[-2])/2
xgrid, ygrid = np.meshgrid(xvertices, yvertices)
# various plot options
# define colormap
cmap = plt.get_cmap(kw.pop('cmap', 'viridis'))
clim = kw.pop('clim', [None, None])
# normalized plot
if normalize:
zvals /= np.mean(zvals, axis=0)
# logarithmic plot
if log:
for xx in range(len(xvals)):
zvals[xx] = np.log(zvals[xx])/np.log(10)
# add blocks to plot
do_transpose = kw.pop('transpose', False)
if do_transpose:
colormap = ax.pcolormesh(ygrid.transpose(),
xgrid.transpose(),
zvals.transpose(),
cmap=cmap, vmin=clim[0], vmax=clim[1])
else:
colormap = ax.pcolormesh(xgrid, ygrid, zvals, cmap=cmap,
vmin=clim[0], vmax=clim[1])
return {'fig': ax.figure, 'ax': ax, 'cmap': colormap}
def autolabel_barplot(ax, rects, rotation=90):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 0.5*height,
'%.2f' % (height),
ha='center', va='bottom', rotation=rotation)
def set_axeslabel_color(ax, color):
'''
Ad hoc function to set the labels, ticks, ticklabels and title to a color.
This is useful when e.g., making a presentation on a dark background
'''
ax.tick_params(color=color, which='both') # both major and minor ticks
plt.setp(ax.get_xticklabels(), color=color)
plt.setp(ax.get_yticklabels(), color=color)
plt.setp(ax.yaxis.get_label(), color=color)
plt.setp(ax.xaxis.get_label(), color=color)
plt.setp(ax.title, color=color)
# generate custom colormaps
# Inpired from
# https://stackoverflow.com/questions/23712207/cyclic-colormap-without-visual-distortions-for-use-in-phase-angle-plots
def make_segmented_cmap():
white = '#ffffff'
black = '#000000'
red = '#ff0000'
blue = '#0000ff'
anglemap = col.LinearSegmentedColormap.from_list(
'anglemap', [black, red, white, blue, black], N=256, gamma=1)
return anglemap
def make_anglemap_colorlist(N=256, use_hpl=True):
hue = np.ones(N) # hue
hue[:N // 2] = 11.6 # red
hue[N // 2:] = 258.6 # blue
s = 100 # saturation
lum = np.linspace(0, 100, N // 2) # luminosity
lum = np.hstack((lum, lum[::-1]))
colorlist = np.zeros((N, 3))
for ii in range(N):
if use_hpl:
colorlist[ii, :] = hsluv.hpluv_to_rgb((hue[ii], s, lum[ii]))
else:
colorlist[ii, :] = hsluv.hsluv_to_rgb((hue[ii], s, lum[ii]))
colorlist[colorlist > 1] = 1 # correct numeric errors
colorlist[colorlist < 0] = 0
return colorlist
def make_anglemap(N=256, use_hpl=True):
colorlist = make_anglemap_colorlist(N=N, use_hpl=use_hpl)
return col.ListedColormap(colorlist)
hsluv_anglemap = make_anglemap(use_hpl=False)
def circ_interp(x, y_deg, kind='linear'):
phases = np.deg2rad(y_deg)
newdata_cos = np.cos(phases)
newdata_sin = np.sin(phases)
ip_cos = interp1d(x, newdata_cos, kind=kind)
ip_sin = interp1d(x, newdata_sin, kind=kind)
return lambda interp_at: np.rad2deg(np.arctan2(ip_sin(interp_at), ip_cos(interp_at))) % 360
def make_anglemap45_colorlist(N=256, use_hpl=True):
col_space = 'hpluv' if use_hpl else 'hsluv'
colspace_to_rgb = getattr(hsluv, col_space + '_to_rgb')
rgb_to_colspace = getattr(hsluv, 'rgb_to_' + col_space)
black = [0., 0., 0.]
blue = [0.34, 0.86, 0.70]
violet = [0.34, 0.34, 0.86]
magenta = [0.86, 0.34, 0.86]
pink = [1.00, 0.90, 0.92]
red = [0.86, 0.34, 0.34]
yellow = [0.86, 0.86, 0.34]
green = [0.34, 0.86, 0.34]
rgb_list = [
black,
blue,
violet,
magenta,
pink,
red,
yellow,
green,
black
]
col_pos = np.linspace(0, 1, 9)
[hsl_hue, hsl_sat, hsl_lum] = np.array([rgb_to_colspace(np.array(rgb_col)) for rgb_col in rgb_list]).T
f_circ_interp = circ_interp(col_pos, hsl_hue)
f_hsl_sat = interp1d(col_pos, hsl_sat, kind='linear')
f_hsl_lum = interp1d(col_pos, hsl_lum, kind='linear')
pnts = np.linspace(0, 1, N)
new_col = [
f_circ_interp(pnts),
np.clip(f_hsl_sat(pnts), a_min=0, a_max=100),
np.clip(f_hsl_lum(pnts), a_min=0, a_max=100)
]
new_col = np.array([colspace_to_rgb(np.array(rgb_col)) for rgb_col in np.array(new_col).T])
new_col[new_col < 0] = 0
new_col[new_col > 1] = 1
return new_col
def make_anglemap45(N=256, use_hpl=True):
colorlist = make_anglemap45_colorlist(N=N, use_hpl=use_hpl)
return col.ListedColormap(colorlist)
hsluv_anglemap45 = make_anglemap45(use_hpl=False)
def plot_fit(xvals, fit_res, ax, **plot_kws):
"""
Evaluates a fit result at specified values to plot the fit.
"""
model = fit_res.model
independent_var = model.independent_vars[0]
yvals = model.eval(fit_res.params, **{independent_var: xvals})
ax.plot(xvals, yvals, **plot_kws)
def cmap_to_alpha(cmap):
"""
Takes a cmap and makes the transparency of the cmap
changes with each element.
"""
my_cmap = cmap(np.arange(cmap.N))
# Set alpha
my_cmap[:, -1] = np.linspace(0, 1, cmap.N)
# Create new colormap
my_cmap = col.ListedColormap(my_cmap)
return my_cmap
def cmap_first_to_alpha(cmap):
"""
Makes the first element of a cmap transparant.
"""
my_cmap = cmap(np.arange(cmap.N))
# Set alpha
my_cmap[0, -1] = 0
my_cmap[1:, -1] = 1
# Create new colormap
my_cmap = col.ListedColormap(my_cmap)
return my_cmap
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': [r'\usepackage{gensymb}'],
'axes.labelsize': 8, # fontsize for x and y labels (was 10)
'axes.titlesize': 8,
# 'text.fontsize': 8, # was 10
'legend.fontsize': 8, # was 10
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def connected_zoombox(ax0, ins_ax,
corner_a=(1, 1), corner_b=(2, 2),
square_kws={}, line_kws={}):
"""
Create a rectangle in ax0 corresponding to the ins_ax and connect corners.
Parameters
----------
ax0 : matplotlib axis
The parent axis on which to draw the square and connecting lines.
ins_ax : matplotlib axis
The inset axis. The limits of this axis are taken to determine the
location of the square.
corner_a : tuple of ints
Tuple of location codes used to determine what corners to connect.
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
"""
x_ins = ins_ax.get_xlim()
y_ins = ins_ax.get_ylim()
# xy coordinates corresponding to counterclockwise locations.
# this order is chosen to be consistent with ax.legend()
xy1 = (x_ins[1], y_ins[1]) # upper right
xy2 = (x_ins[0], y_ins[1]) # upper left
xy3 = (x_ins[0], y_ins[0]) # lower left
xy4 = (x_ins[1], y_ins[0]) # lower right
xy_corners = [xy1, xy2, xy3, xy4]
# ensures we have sensible defaults that can be overwritten
def_line_kws = dict(
color='grey',
arrowstyle='-', zorder=0, lw=1.5, ls=':')
def_line_kws.update(line_kws)
conA = ConnectionPatch(xy_corners[corner_a[0]-1],
xy_corners[corner_a[1]-1],
'data', 'data',
axesA=ins_ax, axesB=ax0, **def_line_kws)
ins_ax.add_artist(conA)
conB = ConnectionPatch(xy_corners[corner_b[0]-1],
xy_corners[corner_b[1]-1],
'data', 'data',
axesA=ins_ax, axesB=ax0, **def_line_kws)
ins_ax.add_artist(conB)
def_sq_kws = dict(ec='k', lw=0.5, fill=0, zorder=4)
def_sq_kws.update(square_kws)
rect = Rectangle((x_ins[0], y_ins[0]),
x_ins[1]-x_ins[0], y_ins[1]-y_ins[0],
**def_sq_kws)
ax0.add_patch(rect)
def restore_default_plot_params():
"""
Restore the matplotlib rcParams to their default values
"""
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
| mit |
pbreach/pysd | tests/test_utils.py | 2 | 2986 | """ Utilities for aiding in testing.
Not tests of utilities... That could be confusing."""
import pysd
import numpy as np
import pandas as pd
import os.path
def runner(model_file):
directory = os.path.dirname(model_file)
# load model
if model_file.endswith('.mdl'):
model = pysd.read_vensim(model_file)
elif model_file.endswith(".xmile"):
model = pysd.read_xmile(model_file)
else:
raise AttributeError('Modelfile should be *.mdl or *.xmile')
# load canonical output
try:
canon = pd.read_csv(directory + '/output.csv', index_col='Time')
except IOError:
try:
canon = pd.read_table(directory + '/output.tab', index_col='Time')
except IOError:
raise IOError('Canonical output file not found')
# run model
output = model.run(return_columns=canon.columns)
return output, canon
def assert_frames_close(actual, expected, **kwargs):
"""
Compare DataFrame items by column and
raise AssertionError if any column is not equal.
Ordering of columns is unimportant, items are compared only by label.
NaN and infinite values are supported.
Parameters
----------
actual: pandas.DataFrame
expected: pandas.DataFrame
kwargs:
Examples
--------
>>> assert_frames_close(pd.DataFrame(100, index=range(5), columns=range(3)),
... pd.DataFrame(100, index=range(5), columns=range(3)))
>>> assert_frames_close(pd.DataFrame(100, index=range(5), columns=range(3)),
... pd.DataFrame(110, index=range(5), columns=range(3)),
... rtol=.2)
>>> assert_frames_close(pd.DataFrame(100, index=range(5), columns=range(3)),
... pd.DataFrame(150, index=range(5), columns=range(3)),
... rtol=.2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AssertionError:
...
References
----------
Derived from: http://nbviewer.jupyter.org/gist/jiffyclub/ac2e7506428d5e1d587b
"""
assert (isinstance(actual, pd.DataFrame) and
isinstance(expected, pd.DataFrame)), \
'Inputs must both be pandas DataFrames.'
assert set(expected.columns) == set(actual.columns), \
'test set columns must be equal to those in actual/observed set.'
assert (expected.index.values == actual.index.values).all(), \
'test set and actual set must share a common index'\
'instead found' + expected.index.values + 'vs' + actual.index.values
for col in expected.columns:
try:
assert_allclose(expected[col].values,
actual[col].values,
**kwargs)
except AssertionError as e:
raise AssertionError('Column: ' + str(col) + ' is not close.')
def assert_allclose(x, y, rtol=1.e-5, atol=1.e-5):
assert np.all(np.less_equal(abs(x-y), atol + rtol * abs(y))) | mit |
eggplantbren/DNest4 | code/Templates/Builder/nzunemployment.py | 1 | 2737 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Import DNest4's builder
import dnest4.builder as bd
# Load the data and make a dictionary out of it
nzunemployment = pd.read_csv("nzunemployment.csv")
data = {}
data["N"] = nzunemployment.shape[0]
data["t"] = np.array(nzunemployment["q"])
data["adult"] = np.array(nzunemployment["adult"])
data["youth"] = np.array(nzunemployment["youth"])
# Convert to logits of unemployment rates
data["adult"] *= 0.01
data["adult"] = np.log(data["adult"]/(1.0 - data["adult"]))
data["youth"] *= 0.01
data["youth"] = np.log(data["youth"]/(1.0 - data["youth"]))
# Plot the data
plt.plot(data["t"], data["adult"], "ko-", label="Adult")
plt.plot(data["t"], data["youth"], "go-", label="Youth")
plt.xlabel("Time (quarters)")
plt.ylabel("Unemployment rate (logit)")
plt.legend()
plt.xlim([data["t"].min() - 0.5, data["t"].max() + 0.5])
plt.ylim([-4.0, 0.0])
plt.show()
# A model (of the prior information!)
model = bd.Model()
# AR(1) parameters for adult unemployment rate
model.add_node(bd.Node("mu1", bd.Uniform(-10.0, 0.0)))
model.add_node(bd.Node("L1", bd.LogUniform(1.0, 1E4)))
model.add_node(bd.Node("beta1", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha1", bd.Delta("exp(-1.0/L1)")))
model.add_node(bd.Node("sigma1", bd.Delta("beta1/sqrt(1.0 - alpha1*alpha1)")))
# Sampling distribution for adult data
model.add_node(bd.Node("adult0",\
bd.Normal("mu1", "sigma1"), observed=True))
for i in range(1, data["N"]):
name = "adult{i}".format(i=i)
dist = bd.Normal("mu1 + alpha1*(adult{k} - mu1)".format(k=(i-1)), "beta1")
model.add_node(bd.Node(name, dist, observed=True))
# Parameters relating to youth data
model.add_node(bd.Node("offset", bd.Normal(0.0, 1.0)))
model.add_node(bd.Node("policy_effect", bd.Cauchy(0.0, 0.1)))
model.add_node(bd.Node("L2", bd.LogUniform(1E-2, 1E2)))
model.add_node(bd.Node("beta2", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha2", bd.Delta("exp(-1.0/L2)")))
model.add_node(bd.Node("sigma2", bd.Delta("beta2/sqrt(1.0 - alpha2*alpha2)")))
for i in range(0, data["N"]):
name = "youth{i}".format(i=i)
mean = "adult{i} + offset + ((t{i} >= 90.0)?(policy_effect):(0.0))"\
.format(i=i)
sd = "sigma2"
if i > 0:
ar1_mean = "{mean} + alpha2*(youth{k} - ({mean}))".format(mean=mean,\
k=(i-1))
mean = ar1_mean
sd = "beta2"
dist = bd.Normal(mean, sd)
model.add_node(bd.Node(name, dist, observed=True))
# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)
# Compile the C++ code so it's ready to go
import os
os.system("make")
| mit |
enclose-io/compiler | current/deps/v8/tools/ignition/bytecode_dispatches_report.py | 14 | 9248 | #! /usr/bin/python
#
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import heapq
import json
from matplotlib import colors
from matplotlib import pyplot
import numpy
import struct
import sys
__DESCRIPTION = """
Process v8.ignition_dispatches_counters.json and list top counters,
or plot a dispatch heatmap.
Please note that those handlers that may not or will never dispatch
(e.g. Return or Throw) do not show up in the results.
"""
__HELP_EPILOGUE = """
examples:
# Print the hottest bytecodes in descending order, reading from
# default filename v8.ignition_dispatches_counters.json (default mode)
$ tools/ignition/bytecode_dispatches_report.py
# Print the hottest 15 bytecode dispatch pairs reading from data.json
$ tools/ignition/bytecode_dispatches_report.py -t -n 15 data.json
# Save heatmap to default filename v8.ignition_dispatches_counters.svg
$ tools/ignition/bytecode_dispatches_report.py -p
# Save heatmap to filename data.svg
$ tools/ignition/bytecode_dispatches_report.py -p -o data.svg
# Open the heatmap in an interactive viewer
$ tools/ignition/bytecode_dispatches_report.py -p -i
# Display the top 5 sources and destinations of dispatches to/from LdaZero
$ tools/ignition/bytecode_dispatches_report.py -f LdaZero -n 5
"""
__COUNTER_BITS = struct.calcsize("P") * 8 # Size in bits of a pointer
__COUNTER_MAX = 2**__COUNTER_BITS - 1
def warn_if_counter_may_have_saturated(dispatches_table):
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
if counter == __COUNTER_MAX:
print("WARNING: {} -> {} may have saturated.".format(source,
destination))
def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
def flattened_counters_generator():
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
yield source, destination, counter
return heapq.nlargest(top_count, flattened_counters_generator(),
key=lambda x: x[2])
def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
top_bytecode_dispatch_pairs = (
find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
print("Top {} bytecode dispatch pairs:".format(top_count))
for source, destination, counter in top_bytecode_dispatch_pairs:
print("{:>12d}\t{} -> {}".format(counter, source, destination))
def find_top_bytecodes(dispatches_table):
top_bytecodes = []
for bytecode, counters_from_bytecode in iteritems(dispatches_table):
top_bytecodes.append((bytecode, sum(itervalues(counters_from_bytecode))))
top_bytecodes.sort(key=lambda x: x[1], reverse=True)
return top_bytecodes
def print_top_bytecodes(dispatches_table):
top_bytecodes = find_top_bytecodes(dispatches_table)
print("Top bytecodes:")
for bytecode, counter in top_bytecodes:
print("{:>12d}\t{}".format(counter, bytecode))
def find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_source_relative):
sources = []
for source, destinations in iteritems(dispatches_table):
total = float(sum(itervalues(destinations)))
if bytecode in destinations:
count = destinations[bytecode]
sources.append((source, count, count / total))
destinations = []
bytecode_destinations = dispatches_table[bytecode]
bytecode_total = float(sum(itervalues(bytecode_destinations)))
for destination, count in iteritems(bytecode_destinations):
destinations.append((destination, count, count / bytecode_total))
return (heapq.nlargest(top_count, sources,
key=lambda x: x[2 if sort_source_relative else 1]),
heapq.nlargest(top_count, destinations, key=lambda x: x[1]))
def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
top_count, sort_relative):
top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_relative)
print("Top sources of dispatches to {}:".format(bytecode))
for source_name, counter, ratio in top_sources:
print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name))
print("\nTop destinations of dispatches from {}:".format(bytecode))
for destination_name, counter, ratio in top_destinations:
print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name))
def build_counters_matrix(dispatches_table):
labels = sorted(dispatches_table.keys())
counters_matrix = numpy.empty([len(labels), len(labels)], dtype=int)
for from_index, from_name in enumerate(labels):
current_row = dispatches_table[from_name];
for to_index, to_name in enumerate(labels):
counters_matrix[from_index, to_index] = current_row.get(to_name, 0)
# Reverse y axis for a nicer appearance
xlabels = labels
ylabels = list(reversed(xlabels))
counters_matrix = numpy.flipud(counters_matrix)
return counters_matrix, xlabels, ylabels
def plot_dispatches_table(dispatches_table, figure, axis):
counters_matrix, xlabels, ylabels = build_counters_matrix(dispatches_table)
image = axis.pcolor(
counters_matrix,
cmap="jet",
norm=colors.LogNorm(),
edgecolor="grey",
linestyle="dotted",
linewidth=0.5
)
axis.xaxis.set(
ticks=numpy.arange(0.5, len(xlabels)),
label="From bytecode handler"
)
axis.xaxis.tick_top()
axis.set_xlim(0, len(xlabels))
axis.set_xticklabels(xlabels, rotation="vertical")
axis.yaxis.set(
ticks=numpy.arange(0.5, len(ylabels)),
label="To bytecode handler",
ticklabels=ylabels
)
axis.set_ylim(0, len(ylabels))
figure.colorbar(
image,
ax=axis,
fraction=0.01,
pad=0.01
)
def parse_command_line():
command_line_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__DESCRIPTION,
epilog=__HELP_EPILOGUE
)
command_line_parser.add_argument(
"--plot-size", "-s",
metavar="N",
default=30,
help="shorter side in inches of the output plot (default 30)"
)
command_line_parser.add_argument(
"--plot", "-p",
action="store_true",
help="plot dispatch pairs heatmap"
)
command_line_parser.add_argument(
"--interactive", "-i",
action="store_true",
help="open the heatmap in an interactive viewer, instead of writing to file"
)
command_line_parser.add_argument(
"--top-bytecode-dispatch-pairs", "-t",
action="store_true",
help="print the top bytecode dispatch pairs"
)
command_line_parser.add_argument(
"--top-entries-count", "-n",
metavar="N",
type=int,
default=10,
help="print N top entries when running with -t or -f (default 10)"
)
command_line_parser.add_argument(
"--top-dispatches-for-bytecode", "-f",
metavar="<bytecode name>",
help="print top dispatch sources and destinations to the specified bytecode"
)
command_line_parser.add_argument(
"--output-filename", "-o",
metavar="<output filename>",
default="v8.ignition_dispatches_table.svg",
help=("file to save the plot file to. File type is deduced from the "
"extension. PDF, SVG, PNG supported")
)
command_line_parser.add_argument(
"--sort-sources-relative", "-r",
action="store_true",
help=("print top sources in order to how often they dispatch to the "
"specified bytecode, only applied when using -f")
)
command_line_parser.add_argument(
"input_filename",
metavar="<input filename>",
default="v8.ignition_dispatches_table.json",
nargs='?',
help="Ignition counters JSON file"
)
return command_line_parser.parse_args()
def itervalues(d):
return d.values() if sys.version_info[0] > 2 else d.itervalues()
def iteritems(d):
return d.items() if sys.version_info[0] > 2 else d.iteritems()
def main():
program_options = parse_command_line()
with open(program_options.input_filename) as stream:
dispatches_table = json.load(stream)
warn_if_counter_may_have_saturated(dispatches_table)
if program_options.plot:
figure, axis = pyplot.subplots()
plot_dispatches_table(dispatches_table, figure, axis)
if program_options.interactive:
pyplot.show()
else:
figure.set_size_inches(program_options.plot_size,
program_options.plot_size)
pyplot.savefig(program_options.output_filename)
elif program_options.top_bytecode_dispatch_pairs:
print_top_bytecode_dispatch_pairs(
dispatches_table, program_options.top_entries_count)
elif program_options.top_dispatches_for_bytecode:
print_top_dispatch_sources_and_destinations(
dispatches_table, program_options.top_dispatches_for_bytecode,
program_options.top_entries_count, program_options.sort_sources_relative)
else:
print_top_bytecodes(dispatches_table)
if __name__ == "__main__":
main()
| mit |
griffint/SoftwareSystems | hw04/wave3/generate_sine.py | 23 | 2124 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import thinkdsp
import thinkplot
import matplotlib.pyplot as pyplot
def print_reverse_tables():
print 'int reverse1[] = {'
for i in range(64):
s = bin(i)[2:].zfill(6)
print '%d,' % int(s[::-1], 2),
print '};'
print 'int reverse2[] = {'
for i in [0, 2, 1, 3]:
print '%d,' % (i * 64),
print '};'
def print_c_wave(wave, name='wave1'):
print 'int %s[] = {' % name
ys = 0.7 + (4.3) * (wave.ys + 1) / 2
zs = [int(y * 256 / 5) for y in ys]
t = [str(z) for z in zs]
print ','.join(t)
print '};'
def make_sine():
framerate = 8000
signal = thinkdsp.SinSignal(440, amp=1.0, offset=0)
duration = signal.period
wave = signal.make_wave(duration=duration, start=0, framerate=framerate)
print 'Number of samples', len(wave.ys)
print 'Timestep in ms', 1.0 / framerate * 1000
# plot the segment
#wave.plot()
#thinkplot.Show(
# xlabel='time (s)',
# axis=[0, duration, -1.05, 1.05])
print_c_wave(wave)
def file_example(start=0.1, duration=0.6):
"""Demonstrates methods in the thinkdsp module.
"""
# read the file recording
wave = thinkdsp.read_wave('51743__erkanozan__applause.wav')
# extract a segment
segment = wave.segment(start, duration)
# make the spectrum
spectrum = segment.make_spectrum()
# apply a filter
spectrum.low_pass(600)
# invert the spectrum
filtered = spectrum.make_wave()
# prepare the original and filtered segments
filtered.normalize()
filtered.apodize()
segment.apodize()
# write the original and filtered segments to a file
filename = 'filtered.wav'
wfile = thinkdsp.WavFileWriter(filename, segment.framerate)
wfile.write(segment)
wfile.write(filtered)
wfile.close()
thinkdsp.play_wave(filename)
def main():
make_sine()
# file_example()
if __name__ == '__main__':
main()
| gpl-3.0 |
petosegan/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
cbertinato/pandas | pandas/tests/plotting/test_converter.py | 1 | 12404 | from datetime import date, datetime
import subprocess
import sys
import numpy as np
import pytest
import pandas._config.config as cf
from pandas.compat.numpy import np_datetime64_compat
from pandas import Index, Period, Series, Timestamp, date_range
import pandas.util.testing as tm
from pandas.plotting import (
deregister_matplotlib_converters, register_matplotlib_converters)
from pandas.tseries.offsets import Day, Micro, Milli, Second
try:
from pandas.plotting._matplotlib import converter
except ImportError:
# try / except, rather than skip, to avoid internal refactoring
# causing an improprer skip
pass
pytest.importorskip('matplotlib.pyplot')
def test_initial_warning():
code = (
"import pandas as pd; import matplotlib.pyplot as plt; "
"s = pd.Series(1, pd.date_range('2000', periods=12)); "
"fig, ax = plt.subplots(); "
"ax.plot(s.index, s.values)"
)
call = [sys.executable, '-c', code]
out = subprocess.check_output(call, stderr=subprocess.STDOUT).decode()
assert 'Using an implicitly' in out
def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num("00:01"))
class TestRegistration:
def test_register_by_default(self):
# Run in subprocess to ensure a clean state
code = ("'import matplotlib.units; "
"import pandas as pd; "
"units = dict(matplotlib.units.registry); "
"assert pd.Timestamp in units)'")
call = [sys.executable, '-c', code]
assert subprocess.check_call(call) == 0
def test_warns(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
_, ax = plt.subplots()
# Set to the "warning" state, in case this isn't the first test run
converter._WARN = True
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
ax.plot(s.index, s.values)
plt.close()
assert len(w) == 1
assert "Using an implicitly registered datetime converter" in str(w[0])
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
_, ax = plt.subplots()
# Set to the "warn" state, in case this isn't the first test run
converter._WARN = True
register_matplotlib_converters()
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
assert len(w) == 0
def test_pandas_plots_register(self):
pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
# Set to the "warn" state, in case this isn't the first test run
converter._WARN = True
with tm.assert_produces_warning(None) as w:
s.plot()
assert len(w) == 0
def test_matplotlib_formatters(self):
units = pytest.importorskip("matplotlib.units")
assert Timestamp in units.registry
ctx = cf.option_context("plotting.matplotlib.register_converters",
False)
with ctx:
assert Timestamp not in units.registry
assert Timestamp in units.registry
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters",
False)
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
_, ax = plt.subplots()
converter._WARN = True
# Test without registering first, no warning
with ctx:
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
assert len(w) == 0
# Now test with registering
converter._WARN = True
register_matplotlib_converters()
with ctx:
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
assert len(w) == 0
def test_registry_resets(self):
units = pytest.importorskip("matplotlib.units")
dates = pytest.importorskip("matplotlib.dates")
# make a copy, to reset to
original = dict(units.registry)
try:
# get to a known state
units.registry.clear()
date_converter = dates.DateConverter()
units.registry[datetime] = date_converter
units.registry[date] = date_converter
register_matplotlib_converters()
assert units.registry[date] is not date_converter
deregister_matplotlib_converters()
assert units.registry[date] is date_converter
finally:
# restore original stater
units.registry.clear()
for k, v in original.items():
units.registry[k] = v
def test_old_import_warns(self):
with tm.assert_produces_warning(FutureWarning) as w:
from pandas.tseries import converter
converter.register()
assert len(w)
assert ('pandas.plotting.register_matplotlib_converters' in
str(w[0].message))
class TestDateTimeConverter:
def setup_method(self, method):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert("12:22", None, None)
assert (r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(date(2012, 1, 1), None, None)
assert rs == xp
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
assert rs == xp
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np_datetime64_compat('2012-01-01'), None, None)
assert rs == xp
rs = self.dtc.convert(np_datetime64_compat(
'2012-01-01 00:00:00+0000'), None, None)
assert rs == xp
rs = self.dtc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]), None, None)
assert rs[0] == xp
# we have a tz-aware date (constructed to that when we turn to utc it
# is the same as our sample)
ts = (Timestamp('2012-01-01')
.tz_localize('UTC')
.tz_convert('US/Eastern')
)
rs = self.dtc.convert(ts, None, None)
assert rs == xp
rs = self.dtc.convert(ts.to_pydatetime(), None, None)
assert rs == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]), None, None)
assert rs[1] == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(),
None, None)
assert rs[1] == xp
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(
Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(
Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
tm.assert_almost_equal(rs, xp, decimals)
def test_conversion_outofbounds_datetime(self):
# 2579
values = [date(1677, 1, 1), date(1677, 1, 2)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
@pytest.mark.parametrize('time,format_expected', [
(0, '00:00'), # time2num(datetime.time.min)
(86399.999999, '23:59:59.999999'), # time2num(datetime.time.max)
(90000, '01:00'),
(3723, '01:02:03'),
(39723.2, '11:02:03.200')
])
def test_time_formatter(self, time, format_expected):
# issue 18478
result = self.tc(time)
assert result == format_expected
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1,
val2))
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
def test_convert_nested(self):
inner = [Timestamp('2017-01-01'), Timestamp('2017-01-02')]
data = [inner, inner]
result = self.dtc.convert(data, None, None)
expected = [self.dtc.convert(x, None, None) for x in data]
assert (np.array(result) == expected).all()
class TestPeriodConverter:
def setup_method(self, method):
self.pc = converter.PeriodConverter()
class Axis:
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert("2012-1-1", None, self.axis)
assert r1 == r2
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
assert rs == xp
rs = self.pc.convert('2012-1-1', None, self.axis)
assert rs == xp
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
assert rs == xp
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
assert rs == xp
rs = self.pc.convert(
np_datetime64_compat('2012-01-01'), None, self.axis)
assert rs == xp
rs = self.pc.convert(
np_datetime64_compat('2012-01-01 00:00:00+0000'), None, self.axis)
assert rs == xp
rs = self.pc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]),
None, self.axis)
assert rs[0] == xp
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
assert rs == xp
def test_convert_nested(self):
data = ['2012-1-1', '2012-1-2']
r1 = self.pc.convert([data, data], None, self.axis)
r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)]
assert r1 == r2
| bsd-3-clause |
xzh86/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
profxj/old_xastropy | xastropy/xguis/img_widgets.py | 5 | 58283 | """
#;+
#; NAME:
#; spec_widgets
#; Version 1.0
#;
#; PURPOSE:
#; Module for IMG widgets with QT
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys, imp
import matplotlib.pyplot as plt
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
# astropy
from astropy.table.table import Table
from astropy import constants as const
from astropy import units as u
# xastropy
from xastropy.xutils import xdebug as xdb
from xastropy.plotting import utils as xputils
from xastropy.xguis import utils as xguiu
xa_path = imp.find_module('xastropy')[1]
# class ExamineSpecWidget
# class PlotLinesWidget
class ExamineImgWidget(QtGui.QWidget):
''' Widget to show an image and fiddle about
12-Apr-2015 by JXP
'''
import xastropy.xutils as xxu
from xastropy import stats as xstats
def __init__(self, ispec, parent=None, status=None, llist=None,
abs_sys=None, norm=True, second_file=None, zsys=None):
'''
img = np.array (2D)
'''
super(ExamineImgWidget, self).__init__(parent)
# Image
spec, spec_fil = read_spec(ispec, second_file=second_file)
self.orig_spec = spec # For smoothing
self.spec = self.orig_spec
# Abs Systems
if abs_sys is None:
self.abs_sys = []
else:
self.abs_sys = abs_sys
self.norm = norm
self.psdict = {} # Dict for spectra plotting
self.adict = {} # Dict for analysis
self.init_spec()
self.xval = None # Used with velplt
# Status Bar?
if not status is None:
self.statusBar = status
# Line List?
if llist is None:
self.llist = {'Plot': False, 'List': 'None', 'z': 0.}
else:
self.llist = llist
# zsys
if not zsys is None:
self.llist['z'] = zsys
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 150 # 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Make two plots
self.ax = self.fig.add_subplot(1,1,1)
self.fig.subplots_adjust(hspace=0.1, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
#
# Draw on init
self.on_draw()
# Setup the spectrum plotting info
def init_spec(self):
#xy min/max
xmin = np.min(self.spec.dispersion).value
xmax = np.max(self.spec.dispersion).value
ymed = np.median(self.spec.flux).value
ymin = 0. - 0.1*ymed
ymax = ymed * 1.5
#
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.psdict['xmnx'] = np.array([xmin,xmax])
self.psdict['ymnx'] = [ymin,ymax]
self.psdict['sv_xy'] = [ [xmin,xmax], [ymin,ymax] ]
self.psdict['nav'] = navigate(0,0,init=True)
# Analysis dict
self.adict['flg'] = 0 # Column density flag
# Main Driver
def on_key(self,event):
flg = -1
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
## DOUBLETS
if event.key in ['C','M','O','8','B']: # Set left
wave = set_doublet(self, event)
#print('wave = {:g},{:g}'.format(wave[0], wave[1]))
self.ax.plot( [wave[0],wave[0]], self.psdict['ymnx'], '--', color='red')
self.ax.plot( [wave[1],wave[1]], self.psdict['ymnx'], '--', color='red')
flg = 2 # Layer
## SMOOTH
if event.key == 'S':
self.spec = self.spec.box_smooth(2)
flg = 1
if event.key == 'U':
self.spec = self.orig_spec
flg = 1
## ANALYSIS: EW, AODM column density
if event.key in ['N', 'E', '$']:
# If column check for line list
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if (event.key in ['N','E']) & (self.llist['List'] == 'None'):
print('xspec: Choose a Line list first!')
try:
self.statusBar().showMessage('Choose a Line list first!')
except AttributeError:
pass
self.adict['flg'] = 0
return
flg = 1
if self.adict['flg'] == 0:
self.adict['wv_1'] = event.xdata # wavelength
self.adict['C_1'] = event.ydata # continuum
self.adict['flg'] = 1
else:
self.adict['wv_2'] = event.xdata # wavelength
self.adict['C_2'] = event.ydata # continuum
self.adict['flg'] = 2 # Ready to plot + print
# Sort em + make arrays
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
iwv = np.array(sorted([self.adict['wv_1'], self.adict['wv_2']])) * self.spec.wcs.unit
ic = np.array(sorted([self.adict['C_1'], self.adict['C_2']]))
# Calculate the continuum (linear fit)
param = np.polyfit(iwv, ic, 1)
cfunc = np.poly1d(param)
conti = cfunc(self.spec.dispersion)
if event.key == '$': # Simple stats
pix = self.spec.pix_minmax(iwv)[0]
mean = np.mean(self.spec.flux[pix])
median = np.median(self.spec.flux[pix])
stdv = np.std(self.spec.flux[pix]-conti[pix])
S2N = median / stdv
mssg = 'Mean={:g}, Median={:g}, S/N={:g}'.format(mean,median,S2N)
else:
# Find the spectral line (or request it!)
rng_wrest = iwv / (self.llist['z']+1)
gdl = np.where( (self.llist[self.llist['List']]['wrest']-rng_wrest[0]) *
(self.llist[self.llist['List']]['wrest']-rng_wrest[1]) < 0.)[0]
if len(gdl) == 1:
wrest = self.llist[self.llist['List']]['wrest'][gdl[0]]
else:
if len(gdl) == 0: # Search through them all
gdl = np.arange(len(self.llist[self.llist['List']]))
sel_widg = SelectLineWidget(self.llist[self.llist['List']][gdl])
sel_widg.exec_()
line = sel_widg.line
wrest = float(line.split('::')[1].lstrip())
# Generate the Spectral Line
from xastropy.spec.lines_utils import AbsLine
aline = AbsLine(wrest)
aline.analy['z'] = self.llist['z']
aline.spec = self.spec
# AODM
if event.key == 'N':
# Calculate the velocity limits and load-up
aline.analy['VLIM'] = const.c.to('km/s') * (
( iwv/(1+self.llist['z']) - wrest) / wrest )
# AODM
aline.aodm(conti=conti)
mssg = 'Using '+ aline.__repr__()
mssg = mssg + ' :: logN = {:g} +/- {:g}'.format(aline.attrib['logN'],
aline.attrib['sig_logN'])
elif event.key == 'E': #EW
aline.analy['WVMNX'] = iwv
aline.restew(conti=conti)
mssg = 'Using '+ aline.__repr__()
mssg = mssg + ' :: EW = {:g} +/- {:g}'.format(aline.attrib['EW'].to('mAA'),
aline.attrib['sigEW'].to('mAA'))
# Display values
try:
self.statusBar().showMessage(mssg)
except AttributeError:
pass
print(mssg)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
## Velocity plot
if event.key == 'v':
flg = 0
from xastropy.xguis import spec_guis as xsgui
z=self.llist['z']
# Check for a match in existing list and use it if so
if len(self.abs_sys) > 0:
zabs = np.array([abs_sys.zabs for abs_sys in self.abs_sys])
mt = np.where( np.abs(zabs-z) < 1e-4)[0]
else:
mt = []
if len(mt) == 1:
ini_abs_sys = self.abs_sys[mt[0]]
outfil = ini_abs_sys.absid_file
self.vplt_flg = 0 # Old one
print('Using existing ID file {:s}'.format(outfil))
else:
ini_abs_sys = None
outfil = None
self.vplt_flg = 1 # New one
# Outfil
if outfil is None:
i0 = self.spec.filename.rfind('/')
i1 = self.spec.filename.rfind('.')
if i0 < 0:
path = './ID_LINES/'
else:
path = self.spec.filename[0:i0]+'/ID_LINES/'
outfil = path + self.spec.filename[i0+1:i1]+'_z'+'{:.4f}'.format(z)+'_id.fits'
xxu.files.ensure_dir(outfil)
self.outfil = outfil
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Launch
gui = xsgui.XVelPltGui(self.spec, z=z, outfil=outfil, llist=self.llist,
abs_sys=ini_abs_sys, norm=self.norm, sel_wv=self.xval)
gui.exec_()
if gui.flg_quit == 0: # Quit without saving (i.e. discarded)
self.vplt_flg = 0
else:
# Push to Abs_Sys
if len(mt) == 1:
self.abs_sys[mt[0]] = gui.abs_sys
else:
self.abs_sys.append(gui.abs_sys)
print('Adding new abs system')
# Redraw
flg=1
# Dummy keys
if event.key in ['shift', 'control']:
flg = 0
# Draw
if flg==1: # Default is not to redraw
self.on_draw()
elif flg==2: # Layer (no clear)
self.on_draw(replot=False)
elif flg==-1: # Layer (no clear)
try:
self.statusBar().showMessage('Not a valid key! {:s}'.format(event.key))
except AttributeError:
pass
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
print('Out of bounds')
return
if event.button == 1: # Draw line
self.xval = event.xdata
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
# ######
def on_draw(self, replot=True):
""" Redraws the spectrum
"""
#
if replot is True:
self.ax.clear()
self.ax.plot(self.spec.dispersion, self.spec.flux, 'k-',drawstyle='steps-mid')
self.ax.plot(self.spec.dispersion, self.spec.sig, 'r:')
#self.ax.plot(self.spec.dispersion, self.spec.flux, 'k-',drawstyle='steps-mid')
#self.ax.plot(self.spec.dispersion, self.spec.sig, 'r:')
self.ax.set_xlabel('Wavelength')
self.ax.set_ylabel('Flux')
# Spectral lines?
if self.llist['Plot'] is True:
ylbl = self.psdict['ymnx'][1]-0.2*(self.psdict['ymnx'][1]-self.psdict['ymnx'][0])
z = self.llist['z']
wvobs = np.array((1+z) * self.llist[self.llist['List']]['wrest'])
gdwv = np.where( (wvobs > self.psdict['xmnx'][0]) &
(wvobs < self.psdict['xmnx'][1]))[0]
for kk in range(len(gdwv)):
jj = gdwv[kk]
wrest = self.llist[self.llist['List']]['wrest'][jj]
lbl = self.llist[self.llist['List']]['name'][jj]
# Plot
self.ax.plot(wrest*np.array([z+1,z+1]), self.psdict['ymnx'], 'b--')
# Label
self.ax.text(wrest*(z+1), ylbl, lbl, color='blue', rotation=90., size='small')
# Abs Sys?
if not self.abs_sys is None:
ylbl = self.psdict['ymnx'][0]+0.2*(self.psdict['ymnx'][1]-self.psdict['ymnx'][0])
clrs = ['red', 'green', 'cyan', 'orange', 'gray', 'purple']*10
for abs_sys in self.abs_sys:
ii = self.abs_sys.index(abs_sys)
wrest = np.array(abs_sys.lines.keys())
wvobs = wrest * (abs_sys.zabs+1)
gdwv = np.where( ((wvobs+5) > self.psdict['xmnx'][0]) & # Buffer for region
((wvobs-5) < self.psdict['xmnx'][1]))[0]
for kk in range(len(gdwv)):
jj = gdwv[kk]
if abs_sys.lines[wrest[jj]].analy['FLG_ANLY'] == 0:
continue
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Paint spectrum red
wvlim = wvobs[jj]*(1 + abs_sys.lines[wrest[jj]].analy['VLIM']/3e5)
pix = np.where( (self.spec.dispersion > wvlim[0]) & (self.spec.dispersion < wvlim[1]))[0]
self.ax.plot(self.spec.dispersion[pix], self.spec.flux[pix], '-',drawstyle='steps-mid',
color=clrs[ii])
# Label
lbl = abs_sys.lines[wrest[jj]].analy['IONNM']+' z={:g}'.format(abs_sys.zabs)
self.ax.text(wvobs[jj], ylbl, lbl, color=clrs[ii], rotation=90., size='x-small')
# Analysis? EW, Column
if self.adict['flg'] == 1:
self.ax.plot(self.adict['wv_1'], self.adict['C_1'], 'go')
elif self.adict['flg'] == 2:
self.ax.plot([self.adict['wv_1'], self.adict['wv_2']],
[self.adict['C_1'], self.adict['C_2']], 'g--', marker='o')
self.adict['flg'] = 0
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
self.ax.set_ylim(self.psdict['ymnx'])
# Draw
self.canvas.draw()
# Notes on usage
def help_notes():
doublets = [ 'Doublets --------',
'C: CIV',
'M: MgII',
'O: OVI',
'8: NeVIII',
'B: Lyb/Lya'
]
analysis = [ 'Analysis --------',
'N/N: Column density (AODM)',
'E/E: EW (boxcar)',
'$/$: stats on spectrum'
]
# #####
class PlotLinesWidget(QtGui.QWidget):
''' Widget to set up spectral lines for plotting
13-Dec-2014 by JXP
'''
def __init__(self, parent=None, status=None, init_llist=None, init_z=None):
'''
'''
super(PlotLinesWidget, self).__init__(parent)
# Initialize
if not status is None:
self.statusBar = status
if init_z is None:
init_z = 0.
# Create a dialog window for redshift
z_label = QtGui.QLabel('z=')
self.zbox = QtGui.QLineEdit()
self.zbox.z_frmt = '{:.7f}'
self.zbox.setText(self.zbox.z_frmt.format(init_z))
self.zbox.setMinimumWidth(50)
self.connect(self.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
# Create the line list
self.lists = ['None', 'grb.lst', 'dla.lst', 'lls.lst',
'lyman.lst', 'gal_vac.lst', 'ne8.lst',
'lowz_ovi.lst', 'casbah.lst']
list_label = QtGui.QLabel('Line Lists:')
self.llist_widget = QtGui.QListWidget(self)
for ilist in self.lists:
self.llist_widget.addItem(ilist)
self.llist_widget.setCurrentRow(0)
self.llist_widget.currentItemChanged.connect(self.on_list_change)
self.llist_widget.setMaximumHeight(100)
# Input line list?
if init_llist is None:
self.llist = {} # Dict for the line lists
self.llist['Plot'] = False
self.llist['z'] = 0.
self.llist['List'] = 'None'
else: # Fill it all up and select
self.llist = init_llist
if not init_llist['List'] in self.lists:
self.lists.append(init_llist['List'])
self.llist_widget.addItem(init_llist['List'])
self.llist_widget.setCurrentRow(len(self.lists)-1)
else:
idx = self.lists.index(init_llist['List'])
self.llist_widget.setCurrentRow(idx)
try:
self.zbox.setText(self.zbox.z_frmt.format(init_llist['z']))
except KeyError:
pass
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(z_label)
vbox.addWidget(self.zbox)
vbox.addWidget(list_label)
vbox.addWidget(self.llist_widget)
self.setLayout(vbox)
self.setMaximumHeight(200)
def on_list_change(self,curr,prev):
llist = str(curr.text())
# Print
try:
self.statusBar().showMessage('You chose: {:s}'.format(llist))
except AttributeError:
print('You chose: {:s}'.format(curr.text()))
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.llist = set_llist(llist,in_dict=self.llist)
# Try to draw
if self.llist['Plot'] is True:
try:
self.spec_widg.on_draw()
except AttributeError:
return
def setz(self):
sstr = unicode(self.zbox.text())
try:
self.llist['z'] = float(sstr)
except ValueError:
try:
self.statusBar().showMessage('ERROR: z Input must be a float! Try again..')
except AttributeError:
print('ERROR: z Input must be a float! Try again..')
self.zbox.setText(self.zbox.z_frmt.format(self.llist['z']))
return
# Report
try:
self.statusBar().showMessage('z = {:g}'.format(self.llist['z']))
except AttributeError:
print('z = {:g}'.format(self.llist['z']))
# Try to draw
try:
self.spec_widg.on_draw()
except AttributeError:
return
# #####
class SelectLineWidget(QtGui.QDialog):
''' Widget to select a spectral line
inp: string or dict or Table
Input line list
15-Dec-2014 by JXP
'''
def __init__(self, inp, parent=None):
'''
'''
super(SelectLineWidget, self).__init__(parent)
# Line list Table
if isinstance(inp,Table):
lines = inp
else:
raise ValueError('SelectLineWidget: Wrong type of input')
self.resize(250, 800)
# Create the line list
line_label = QtGui.QLabel('Lines:')
self.lines_widget = QtGui.QListWidget(self)
self.lines_widget.addItem('None')
self.lines_widget.setCurrentRow(0)
#xdb.set_trace()
# Loop on lines (could put a preferred list first)
nlin = len(lines['wrest'])
for ii in range(nlin):
self.lines_widget.addItem('{:s} :: {:.4f}'.format(lines['name'][ii],
lines['wrest'][ii]))
self.lines_widget.currentItemChanged.connect(self.on_list_change)
#self.scrollArea = QtGui.QScrollArea()
# Quit
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(self.close)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(line_label)
vbox.addWidget(self.lines_widget)
vbox.addWidget(qbtn)
self.setLayout(vbox)
def on_list_change(self,curr,prev):
self.line = str(curr.text())
# Print
print('You chose: {:s}'.format(curr.text()))
# #####
class SelectedLinesWidget(QtGui.QWidget):
''' Widget to show and enable lines to be selected
inp: Table or (future: string or dict)
Input line list
24-Dec-2014 by JXP
'''
def __init__(self, inp, parent=None, init_select=None, plot_widget=None):
'''
'''
super(SelectedLinesWidget, self).__init__(parent)
# Line list Table
if isinstance(inp,Table):
self.lines = inp
else:
raise ValueError('SelectLineWidget: Wrong type of input')
self.plot_widget = plot_widget
# Create the line list
line_label = QtGui.QLabel('Lines:')
self.lines_widget = QtGui.QListWidget(self)
self.lines_widget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
# Initialize list
self.item_flg = 0
self.init_list()
# Initial selection
if init_select is None:
self.selected = [0]
else:
self.selected = init_select
for iselect in self.selected:
self.lines_widget.item(iselect).setSelected(True)
self.lines_widget.scrollToItem( self.lines_widget.item( self.selected[0] ) )
# Events
#self.lines_widget.itemClicked.connect(self.on_list_change)
self.lines_widget.itemSelectionChanged.connect(self.on_item_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(line_label)
vbox.addWidget(self.lines_widget)
self.setLayout(vbox)
def init_list(self):
nlin = len(self.lines['wrest'])
for ii in range(nlin):
self.lines_widget.addItem('{:s} :: {:.3f}'.format(self.lines['name'][ii],
self.lines['wrest'][ii]))
def on_item_change(self): #,item):
# For big changes
if self.item_flg == 1:
return
all_items = [self.lines_widget.item(ii) for ii in range(self.lines_widget.count())]
sel_items = self.lines_widget.selectedItems()
self.selected = [all_items.index(isel) for isel in sel_items]
self.selected.sort()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Update llist
try:
self.plot_widget.llist['show_line'] = self.selected
except AttributeError:
return
else:
self.plot_widget.on_draw()
def on_list_change(self,lines):
# Clear
self.item_flg = 1
self.lines_widget.clear()
# Initialize
self.lines = lines
self.init_list()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Set selected
for iselect in self.selected:
self.lines_widget.item(iselect).setSelected(True)
self.lines_widget.scrollToItem( self.lines_widget.item( self.selected[0] ) )
self.item_flg = 0
# #####
class AbsSysWidget(QtGui.QWidget):
''' Widget to organize AbsSys along a given sightline
Parameters:
-----------
abssys_list: List
String list of abssys files
16-Dec-2014 by JXP
'''
def __init__(self, abssys_list, parent=None):
'''
'''
super(AbsSysWidget, self).__init__(parent)
#if not status is None:
# self.statusBar = status
self.abssys_list = abssys_list
# Create the line list
list_label = QtGui.QLabel('Abs Systems:')
self.abslist_widget = QtGui.QListWidget(self)
self.abslist_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.abslist_widget.addItem('None')
#self.abslist_widget.addItem('Test')
# Lists
self.abs_sys = []
self.items = []
self.all_items = []
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLS_System.from_absid_fil(abssys_fil))
self.add_item(abssys_fil)
self.abslist_widget.setCurrentRow(0)
self.abslist_widget.itemSelectionChanged.connect(self.on_list_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(list_label)
# Buttons
buttons = QtGui.QWidget()
self.refine_button = QtGui.QPushButton('Refine', self)
#self.refine_button.clicked.connect(self.refine) # CONNECTS TO A PARENT
reload_btn = QtGui.QPushButton('Reload', self)
reload_btn.clicked.connect(self.reload)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.refine_button)
hbox1.addWidget(reload_btn)
buttons.setLayout(hbox1)
vbox.addWidget(buttons)
vbox.addWidget(self.abslist_widget)
self.setLayout(vbox)
# ##
def on_list_change(self):
items = self.abslist_widget.selectedItems()
# Empty the list
#self.abs_sys = []
if len(self.abs_sys) > 0:
for ii in range(len(self.abs_sys)-1,-1,-1):
self.abs_sys.pop(ii)
# Load up abs_sys (as need be)
new_items = []
for item in items:
txt = item.text()
# Dummy
if txt == 'None':
continue
print('Including {:s} in the list'.format(txt))
# Using LLS for now. Might change to generic
new_items.append(txt)
ii = self.all_items.index(txt)
self.abs_sys.append(self.all_abssys[ii])
# Pass back
self.items = new_items
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
def add_fil(self,abssys_fil):
self.abssys_list.append( abssys_fil )
self.add_item(abssys_fil)
def add_item(self,abssys_fil):
ipos0 = abssys_fil.rfind('/') + 1
ipos1 = abssys_fil.rfind('.fits')
self.all_items.append( abssys_fil[ipos0:ipos1] )
self.abslist_widget.addItem(abssys_fil[ipos0:ipos1] )
def reload(self):
print('AbsSysWidget: Reloading systems..')
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLS_System.from_absid_fil(abssys_fil))
#self.add_item(abssys_fil)
self.on_list_change()
# ######################
class VelPlotWidget(QtGui.QWidget):
''' Widget for a velocity plot with interaction.
19-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.], abs_sys=None):
'''
spec = Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
'''
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
super(VelPlotWidget, self).__init__(parent)
# Initialize
spec, spec_fil = read_spec(ispec)
self.spec = spec
self.spec_fil = spec_fil
self.z = z
self.vmnx = vmnx
self.norm = norm
# Abs_System
self.abs_sys = abs_sys
if self.abs_sys is None:
self.abs_sys = xiaa.Generic_System(None)
self.abs_sys.zabs = self.z
else:
self.z = self.abs_sys.zabs
# Line list
if llist is None:
try:
lwrest = self.abs_sys.lines.keys()
except AttributeError:
lwrest = None
if not lwrest is None:
llist = set_llist(lwrest)
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = navigate(0,0,init=True)
# Status Bar?
#if not status is None:
# self.statusBar = status
# Line List
if llist is None:
self.llist = set_llist('lls.lst')
else:
self.llist = llist
self.llist['z'] = self.z
# Indexing for line plotting
self.idx_line = 0
self.init_lines()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Sub_plots
self.sub_xy = [3,4]
self.fig.subplots_adjust(hspace=0.0, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Load them up for display
def init_lines(self):
wvmin = np.min(self.spec.dispersion)
wvmax = np.max(self.spec.dispersion)
#
wrest = u.Quantity(self.llist[self.llist['List']]['wrest'])
wvobs = (1+self.z) * wrest
gdlin = np.where( (wvobs > wvmin) & (wvobs < wvmax) )[0]
self.llist['show_line'] = gdlin
existing_lines = self.abs_sys.lines.keys()
# Update/generate lines
for idx in gdlin:
# Generate?
kwrest = wrest[idx].value
if not kwrest in existing_lines:
self.abs_sys.lines[kwrest] = xspec.analysis.Spectral_Line(kwrest)
print('VelPlot: Generating line {:g}'.format(kwrest))
self.abs_sys.lines[kwrest].analy['VLIM'] = np.array([self.vmnx[0]/2.,
self.vmnx[1]/2.])
self.abs_sys.lines[kwrest].analy['FLG_ANLY'] = 2 # Init to ok
# Spec file
if not self.spec_fil is None:
self.abs_sys.lines[kwrest].analy['DATFIL'] = self.spec_fil
# Key stroke
def on_key(self,event):
# Init
rescale = True
fig_clear = False
wrest = None
flg = 0
sv_idx = self.idx_line
## Change rows/columns
if event.key == 'k':
self.sub_xy[0] = max(0, self.sub_xy[0]-1)
if event.key == 'K':
self.sub_xy[0] = self.sub_xy[0]+1
if event.key == 'c':
self.sub_xy[1] = max(0, self.sub_xy[1]-1)
if event.key == 'C':
self.sub_xy[1] = max(0, self.sub_xy[1]+1)
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
if event.key == '-':
self.idx_line = max(0, self.idx_line-self.sub_xy[0]*self.sub_xy[1]) # Min=0
if self.idx_line == sv_idx:
print('Edge of list')
if event.key == '=':
self.idx_line = min(len(self.llist['show_line'])-self.sub_xy[0]*self.sub_xy[1],
self.idx_line + self.sub_xy[0]*self.sub_xy[1])
if self.idx_line == sv_idx:
print('Edge of list')
## Reset z
if event.key == 'z':
from astropy.relativity import velocities
newz = velocities.z_from_v(self.z, event.xdata)
self.z = newz
self.abs_sys.zabs = newz
# Drawing
self.psdict['xmnx'] = self.vmnx
# Single line command
if event.key in ['1','2','B','U','L','N','V','A', 'x', 'X']:
try:
wrest = event.inaxes.get_gid()
except AttributeError:
return
else:
kwrest = wrest.value
## Velocity limits
if event.key == '1':
self.abs_sys.lines[kwrest].analy['VLIM'][0] = event.xdata
if event.key == '2':
self.abs_sys.lines[kwrest].analy['VLIM'][1] = event.xdata
if event.key == '!':
for key in self.abs_sys.lines.keys():
try:
self.abs_sys.lines[key].analy['VLIM'][0] = event.xdata
except KeyError:
print('Not setting VLIM for {:g}'.format(key))
if event.key == '@':
for key in self.abs_sys.lines.keys():
try:
self.abs_sys.lines[key].analy['VLIM'][1] = event.xdata
except KeyError:
print('Not setting VLIM for {:g}'.format(key))
## Line type
if event.key == 'A': # Add to lines
if not kwrest in self.abs_sys.lines.keys():
self.abs_sys.lines[kwrest] = xspec.analysis.Spectral_Line(wrest)
print('VelPlot: Generating line {:g}'.format(kwrest))
self.abs_sys.lines[kwrest].analy['VLIM'] = np.array([self.vmnx[0]/2.,
self.vmnx[1]/2.])
self.abs_sys.lines[kwrest].analy['FLG_ANLY'] = 2 # Init to ok
self.abs_sys.lines[kwrest].analy['DATFIL'] = self.spec_fil
if event.key == 'x': # Remove line
if kwrest in self.abs_sys.lines.keys():
self.abs_sys.lines.pop(kwrest)
print('VelPlot: Removed line {:g}'.format(wrest))
if event.key == 'X': # Remove all lines (might add warning widget)
# Double check
gui = xguiu.WarningWidg('About to remove all lines. \n Continue??')
gui.exec_()
if gui.ans is False:
return
#
for kwrest in self.abs_sys.lines.keys():
self.abs_sys.lines.pop(wrest)
print('VelPlot: Removed line {:g}'.format(wrest))
if event.key == 'B': # Toggle blend
try:
feye = self.abs_sys.lines[kwrest].analy['FLG_EYE']
except KeyError:
feye = 0
feye = (feye + 1) % 2
self.abs_sys.lines[kwrest].analy['FLG_EYE'] = feye
if event.key == 'N': # Toggle NG
try:
fanly = self.abs_sys.lines[kwrest].analy['FLG_ANLY']
except KeyError:
fanly = 2
if fanly == 0:
fanly = 2 # Not using 1 anymore..
else:
fanly = 0
self.abs_sys.lines[kwrest].analy['FLG_ANLY'] = fanly
if event.key == 'V': # Normal
self.abs_sys.lines[kwrest].analy['FLG_LIMIT'] = 1
if event.key == 'L': # Lower limit
self.abs_sys.lines[kwrest].analy['FLG_LIMIT'] = 2
if event.key == 'U': # Upper limit
self.abs_sys.lines[kwrest].analy['FLG_LIMIT'] = 3
# AODM plot
if event.key == ':': #
# Grab good lines
from xastropy.xguis import spec_guis as xsgui
gdl = []
for iwr in self.abs_sys.lines.keys():
if self.abs_sys.lines[iwr].analy['FLG_ANLY'] > 0:
gdl.append(iwr)
# Launch AODM
gui = xsgui.XAODMGui(self.spec, self.z, gdl, vmnx=self.vmnx, norm=self.norm)
gui.exec_()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if not wrest is None: # Single window
flg = 3
if event.key in ['c','C','k','K','W','!', '@', '=', '-', 'X', 'z','R']: # Redraw all
flg = 1
if event.key in ['Y']:
rescale = False
if event.key in ['k','c','C','K', 'R']:
fig_clear = True
if flg==1: # Default is not to redraw
self.on_draw(rescale=rescale, fig_clear=fig_clear)
elif flg==2: # Layer (no clear)
self.on_draw(replot=False, rescale=rescale)
elif flg==3: # Layer (no clear)
self.on_draw(in_wrest=wrest, rescale=rescale)
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, replot=True, in_wrest=None, rescale=True, fig_clear=False):
""" Redraws the figure
"""
#
if replot is True:
if fig_clear:
self.fig.clf()
# Loop on windows
all_idx = self.llist['show_line']
nplt = self.sub_xy[0]*self.sub_xy[1]
if len(all_idx) <= nplt:
self.idx_line = 0
subp = np.arange(nplt) + 1
subp_idx = np.hstack(subp.reshape(self.sub_xy[0],self.sub_xy[1]).T)
for jj in range(min(nplt, len(all_idx))):
try:
idx = all_idx[jj+self.idx_line]
except IndexError:
continue # Likely too few lines
# Grab line
#wvobs = np.array((1+self.z) * self.llist[self.llist['List']]['wrest'][idx])
wrest = (self.llist[self.llist['List']]['wrest'][idx] *
self.llist[self.llist['List']]['wrest'].unit)
kwrest = wrest.value # For the Dict
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Single window?
if not in_wrest is None:
if np.abs(wrest-in_wrest) > (1e-3*u.AA):
continue
# Generate plot
self.ax = self.fig.add_subplot(self.sub_xy[0],self.sub_xy[1], subp_idx[jj])
self.ax.clear()
#print('Plotting {:g}, {:d}'.format(wrest,subp_idx[jj]))
# Zero line
self.ax.plot( [0., 0.], [-1e9, 1e9], ':', color='gray')
# Velocity
wvobs = (1+self.z) * wrest
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s').value
# Plot
self.ax.plot(velo, self.spec.flux, 'k-',drawstyle='steps-mid')
# GID for referencing
self.ax.set_gid(wrest)
# Labels
#if jj >= (self.sub_xy[0]-1)*(self.sub_xy[1]):
if ((jj+1) % self.sub_xy[0]) == 0:
self.ax.set_xlabel('Relative Velocity (km/s)')
else:
self.ax.get_xaxis().set_ticks([])
#if ((jj+1) // 2 == 0) & (jj < self.sub_xy[0]):
# self.ax.set_ylabel('Relative Flux')
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
lbl = self.llist[self.llist['List']]['name'][idx]
self.ax.text(0.1, 0.05, lbl, color='blue', transform=self.ax.transAxes,
size='x-small', ha='left')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
# Rescale?
if (rescale is True) & (self.norm is False):
gdp = np.where( (velo > self.psdict['xmnx'][0]) &
(velo < self.psdict['xmnx'][1]))[0]
if len(gdp) > 5:
per = xstats.basic.perc(self.spec.flux[gdp])
self.ax.set_ylim((0., 1.1*per[1]))
else:
self.ax.set_ylim(self.psdict['ymnx'])
else:
self.ax.set_ylim(self.psdict['ymnx'])
# Fonts
xputils.set_fontsize(self.ax,6.)
# Abs_Sys: Color the lines
if not self.abs_sys is None:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
try:
vlim = self.abs_sys.lines[kwrest].analy['VLIM']
except KeyError:
continue
# Color coding
clr = 'black'
try: # .clm style
flag = self.abs_sys.lines[kwrest].analy['FLAGS'][0]
except KeyError:
flag = None
else:
if flag <= 1: # Standard detection
clr = 'green'
elif flag in [2,3]:
clr = 'blue'
elif flag in [4,5]:
clr = 'purple'
# ABS ID
try: # NG?
flagA = self.abs_sys.lines[kwrest].analy['FLG_ANLY']
except KeyError:
flagA = None
else:
if (flagA>0) & (clr == 'black'):
clr = 'green'
try: # Limit?
flagL = self.abs_sys.lines[kwrest].analy['FLG_LIMIT']
except KeyError:
flagL = None
else:
if flagL == 2:
clr = 'blue'
if flagL == 3:
clr = 'purple'
try: # Blends?
flagE = self.abs_sys.lines[kwrest].analy['FLG_EYE']
except KeyError:
flagE = None
else:
if flagE == 1:
clr = 'orange'
if flagA == 0:
clr = 'red'
pix = np.where( (velo > vlim[0]) & (velo < vlim[1]))[0]
self.ax.plot(velo[pix], self.spec.flux[pix], '-',
drawstyle='steps-mid', color=clr)
# Draw
self.canvas.draw()
# ######################
class AODMWidget(QtGui.QWidget):
''' Widget for comparing tau_AODM profiles
19-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, parent=None, vmnx=[-300., 300.],
norm=True):
'''
spec = Spectrum1D
'''
super(AODMWidget, self).__init__(parent)
# Initialize
self.spec = spec
self.norm = norm
self.z = z
self.vmnx = vmnx
self.wrest = wrest
self.lines = []
for iwrest in self.wrest:
self.lines.append(xspec.analysis.Spectral_Line(iwrest))
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = navigate(0,0,init=True)
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Key stroke
def on_key(self,event):
# Init
rescale = True
flg = 0
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
if event.key in ['b','t','W','Z','Y','l','r']:
rescale = False
self.on_draw(rescale=rescale)
# Click of main mouse button
def on_click(self,event):
return # DO NOTHING FOR NOW
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw()
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, rescale=True):
""" Redraws the figure
"""
#
self.ax = self.fig.add_subplot(1,1,1)
self.ax.clear()
ymx = 0.
for ii,iwrest in enumerate(self.wrest):
# Velocity
wvobs = (1+self.z) * iwrest
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s').value
gdp = np.where((velo > self.psdict['xmnx'][0]) &
(velo < self.psdict['xmnx'][1]))[0]
# Normalize?
if self.norm is False:
per = xstats.basic.perc(self.spec.flux[gdp])
fsplice = per[1] / self.spec.flux[gdp]
else:
fsplice = 1./ self.spec.flux[gdp]
# AODM
cst = (10.**14.5761)/(self.lines[ii].atomic['fval']*iwrest.value)
Naodm = np.log(fsplice)*cst
ymx = max(ymx,np.max(Naodm))
# Plot
line, = self.ax.plot(velo[gdp], Naodm, '-', drawstyle='steps-mid')
# Labels
lbl = '{:g}'.format(iwrest)
clr = plt.getp(line, 'color')
self.ax.text(0.1, 1.-(0.05+0.05*ii), lbl, color=clr,
transform=self.ax.transAxes, size='small', ha='left')
self.ax.set_xlabel('Relative Velocity (km/s)')
self.ax.set_ylabel('N(AODM)')
# Zero line
self.ax.plot( [0., 0.], [-1e29, 1e29], ':', color='gray')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
if rescale:
self.psdict['ymnx'] = [0.05*ymx, ymx*1.1]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.ax.set_ylim(self.psdict['ymnx'])
# Fonts
#xputils.set_fontsize(self.ax,6.)
# Draw
self.canvas.draw()
# ######
# Plot Doublet
def set_doublet(iself,event):
''' Set z and plot doublet
'''
wv_dict = {'C': (1548.195, 1550.770, 'CIV'), 'M': (2796.352, 2803.531, 'MgII'),
'O': (1031.9261, 1037.6167, 'OVI'), '8': (770.409, 780.324, 'NeVIII'),
'B': (1025.4433, 1215.6701, 'Lyba')}
wrest = wv_dict[event.key]
# Set z
iself.zabs = event.xdata/wrest[0] - 1.
try:
iself.statusBar().showMessage('z = {:g} for {:s}'.format(iself.zabs, wrest[2]))
except AttributeError:
print('z = {:g} for {:s}'.format(iself.zabs, wrest[2]))
return np.array(wrest[0:2])*(1.+iself.zabs)
# ######
# Navigate
def navigate(psdict,event,init=False):
''' Method to Navigate spectrum
init: (False) Initialize
Just pass back valid key strokes
'''
# Initalize
if init is True:
return ['l','r','b','t','i','I', 'o','O', '[',']','W','Z', 'Y', '{', '}']
#
if (not isinstance(event.xdata,float)) or (not isinstance(event.ydata,float)):
print('Navigate: You entered the {:s} key out of bounds'.format(event.key))
return 0
if event.key == 'l': # Set left
psdict['xmnx'][0] = event.xdata
elif event.key == 'r': # Set Right
psdict['xmnx'][1] = event.xdata
elif event.key == 'b': # Set Bottom
psdict['ymnx'][0] = event.ydata
elif event.key == 't': # Set Top
psdict['ymnx'][1] = event.ydata
elif event.key == 'i': # Zoom in (and center)
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/4.
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'I': # Zoom in (and center)
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/16.
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'o': # Zoom in (and center)
deltx = psdict['xmnx'][1]-psdict['xmnx'][0]
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'O': # Zoom in (and center)
deltx = psdict['xmnx'][1]-psdict['xmnx'][0]
psdict['xmnx'] = [event.xdata-2*deltx, event.xdata+2*deltx]
elif event.key == 'Y': # Zoom in (and center)
delty = psdict['ymnx'][1]-psdict['ymnx'][0]
psdict['ymnx'] = [event.ydata-delty, event.ydata+delty]
elif event.key in ['[',']','{','}']: # Pan
center = (psdict['xmnx'][1]+psdict['xmnx'][0])/2.
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/2.
if event.key == '[':
new_center = center - deltx
elif event.key == ']':
new_center = center + deltx
elif event.key == '{':
new_center = center - 4*deltx
elif event.key == '}':
new_center = center + 4*deltx
psdict['xmnx'] = [new_center-deltx, new_center+deltx]
elif event.key == 'W': # Reset the Window
psdict['xmnx'] = psdict['sv_xy'][0]
psdict['ymnx'] = psdict['sv_xy'][1]
elif event.key == 'Z': # Zero
psdict['ymnx'][0] = 0.
else:
if not (event.key in ['shift']):
rstr = 'Key {:s} not supported.'.format(event.key)
print(rstr)
return 0
return 1
# ######
#
def set_llist(llist,in_dict=None):
''' Method to set a line list dict for the Widgets
'''
if in_dict is None:
in_dict = {}
if isinstance(llist,str) or isinstance(llist,unicode): # Set line list from a file
in_dict['List'] = llist
if llist == 'None':
in_dict['Plot'] = False
else:
in_dict['Plot'] = True
# Load?
if not (llist in in_dict):
#line_file = xa_path+'/data/spec_lines/'+llist
llist_cls = xspec.abs_line.Abs_Line_List(llist)
in_dict[llist] = llist_cls.data
elif isinstance(llist,list): # Set from a list of wrest
from astropy.table import Column
in_dict['List'] = 'input.lst'
in_dict['Plot'] = True
# Fill
llist.sort()
tmp_dict = {}
# Parse from grb.lst
line_file = xa_path+'/data/spec_lines/grb.lst'
llist_cls = xspec.abs_line.Abs_Line_List(line_file)
adict = llist_cls.data
# Fill
names = []
fval = []
for wrest in llist:
mt = np.where(np.abs(wrest-adict['wrest']) < 1e-3)[0]
if len(mt) != 1:
raise ValueError('Problem!')
names.append(adict['name'][mt][0])
fval.append(adict['fval'][mt][0])
# Set
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Generate a Table
col0 = Column(np.array(llist), name='wrest', unit=u.AA) # Assumed Angstroms
col1 = Column(np.array(names), name='name')
col2 = Column(np.array(fval), name='fval')
in_dict['input.lst'] = Table( (col0,col1,col2) )
# Return
return in_dict
# Read spectrum, pass back it and spec_file name
def read_spec(ispec, second_file=None):
#
if isinstance(ispec,str) or isinstance(ispec,unicode):
spec_fil = ispec
spec = xspec.readwrite.readspec(spec_fil)
# Second file?
if not second_file is None:
spec2 = xspec.readwrite.readspec(second_file)
# Scale for convenience of plotting
xper1 = xstats.basic.perc(spec.flux, per=0.9)
xper2 = xstats.basic.perc(spec2.flux, per=0.9)
scl = xper1[1]/xper2[1]
# Stitch together
wave3 = np.append(spec.dispersion, spec2.dispersion)
flux3 = np.append(spec.flux, spec2.flux*scl)
sig3 = np.append(spec.sig, spec2.sig*scl)
#xdb.set_trace()
spec3 = Spectrum1D.from_array(wave3, flux3, uncertainty=StdDevUncertainty(sig3))
# Overwrite
spec = spec3
spec.filename = spec_fil
else:
spec = ispec # Assuming Spectrum1D
spec_fil = spec.filename # Grab from Spectrum1D
# Return
return spec, spec_fil
# ################
# TESTING
if __name__ == "__main__":
from xastropy import spec as xspec
flg_fig = 0
flg_fig += 2**0 # ExamineSpecWidget
#flg_fig += 2**1 # PlotLinesWidget
#flg_fig += 2**2 # SelectLineWidget
#flg_fig += 2**3 # AbsSysWidget
#flg_fig += 2**4 # VelPltWidget
#flg_fig += 2**5 # SelectedLinesWidget
#flg_fig += 2**6 # AODMWidget
# ExamineSpec
if (flg_fig % 2) == 1:
app = QtGui.QApplication(sys.argv)
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = xspec.readwrite.readspec(spec_fil)
app.setApplicationName('XSpec')
main = ExamineSpecWidget(spec)
main.show()
sys.exit(app.exec_())
# PltLineWidget
if (flg_fig % 2**2) >= 2**1:
app = QtGui.QApplication(sys.argv)
app.setApplicationName('PltLine')
main = PlotLinesWidget()
main.show()
sys.exit(app.exec_())
# SelectLineWidget
if (flg_fig % 2**3) >= 2**2:
line_file = xa_path+'/data/spec_lines/grb.lst'
llist_cls = xspec.abs_line.Abs_Line_List(line_file)
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectLine')
main = SelectLineWidget(llist_cls.data)
main.show()
app.exec_()
print(main.line)
sys.exit()
# AbsSys Widget
if (flg_fig % 2**4) >= 2**3:
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2319-1040_z2.675_id.fits'
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AbsSys')
main = AbsSysWidget([abs_fil,abs_fil2])
main.show()
sys.exit(app.exec_())
# VelPlt Widget
if (flg_fig % 2**5) >= 2**4:
specf = 1
if specf == 0: # PH957 DLA
# Spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = xspec.readwrite.readspec(spec_fil)
# Abs_sys
abs_sys = xiaa.Generic_System(None)
abs_sys.clm_fil = '/Users/xavier/DLA/Abund/PH957.z2309.clm'
abs_sys.get_ions(skip_ions=True, fill_lines=True)
abs_sys.zabs = abs_sys.clm_analy.zsys
elif specf == 1: # UM184 LLS
# Spectrum
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = xspec.readwrite.readspec(spec_fil)
# Abs_sys
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/UM184_z2.930_id.fits'
abs_sys = xiaa.Generic_System(None)
abs_sys.parse_absid_file(abs_fil)
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('VelPlot')
main = VelPlotWidget(spec, abs_sys=abs_sys)
main.show()
sys.exit(app.exec_())
# SelectedLines Widget
if (flg_fig % 2**6) >= 2**5:
llist = set_llist('grb.lst')
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectedLines')
main = SelectedLinesWidget(llist['grb.lst'])
main.show()
sys.exit(app.exec_())
# AODM Widget
if (flg_fig % 2**7) >= 2**6:
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = xspec.readwrite.readspec(spec_fil)
z=2.96916
lines = np.array([1548.195, 1550.770]) * u.AA
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = AODMWidget(spec, z, lines)
main.show()
sys.exit(app.exec_())
| bsd-3-clause |
equialgo/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
mfouesneau/faststats | faststats/figrc.py | 1 | 24585 |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.getenv('HOME') + '/bin/python/libs')
# just in case notebook was not launched with the option
#%pylab inline
import pylab as plt
import numpy as np
from scipy import sparse
from matplotlib.mlab import griddata
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Ellipse
try:
import faststats
except ImportError:
faststats = None
#===============================================================================
#============== FIGURE SETUP FUNCTIONS =========================================
#===============================================================================
def theme(ax=None, minorticks=False):
""" update plot to make it nice and uniform """
from matplotlib.ticker import AutoMinorLocator
from pylab import rcParams, gca, tick_params
if minorticks:
if ax is None:
ax = gca()
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.set_minor_locator(AutoMinorLocator())
tick_params(which='both', width=rcParams['lines.linewidth'])
def steppify(x, y):
""" Steppify a curve (x,y). Useful for manually filling histograms """
dx = 0.5 * (x[1:] + x[:-1])
xx = np.zeros( 2 * len(dx), dtype=float)
yy = np.zeros( 2 * len(y), dtype=float)
xx[0::2], xx[1::2] = dx, dx
yy[0::2], yy[1::2] = y, y
xx = np.concatenate(([x[0] - (dx[0] - x[0])], xx, [x[-1] + (x[-1] - dx[-1])]))
return xx, yy
def colorify(data, vmin=None, vmax=None, cmap=plt.cm.Spectral):
""" Associate a color map to a quantity vector """
import matplotlib.colors as colors
_vmin = vmin or min(data)
_vmax = vmax or max(data)
cNorm = colors.normalize(vmin=_vmin, vmax=_vmax)
scalarMap = plt.cm.ScalarMappable(norm=cNorm, cmap=cmap)
colors = map(scalarMap.to_rgba, data)
return colors, scalarMap
def hist_with_err(x, xerr, bins=None, normed=False, step=False, *kwargs):
from scipy import integrate
#check inputs
assert( len(x) == len(xerr) ), 'data size mismatch'
_x = np.asarray(x).astype(float)
_xerr = np.asarray(xerr).astype(float)
#def the evaluation points
if (bins is None) | (not hasattr(bins, '__iter__')):
m = (_x - _xerr).min()
M = (_x + _xerr).max()
dx = M - m
m -= 0.2 * dx
M += 0.2 * dx
if bins is not None:
N = int(bins)
else:
N = 10
_xp = np.linspace(m, M, N)
else:
_xp = 0.5 * (bins[1:] + bins[:-1])
def normal(v, mu, sig):
norm_pdf = 1. / (np.sqrt(2. * np.pi) * sig ) * np.exp( - ( (v - mu ) / (2. * sig) ) ** 2 )
return norm_pdf / integrate.simps(norm_pdf, v)
_yp = np.array([normal(_xp, xk, xerrk) for xk, xerrk in zip(_x, _xerr) ]).sum(axis=0)
if normed:
_yp /= integrate.simps(_yp, _xp)
if step:
return steppify(_xp, _yp)
else:
return _xp, _yp
def hist_with_err_bootstrap(x, xerr, bins=None, normed=False, nsample=50, step=False, **kwargs):
x0, y0 = hist_with_err(x, xerr, bins=bins, normed=normed, step=step, **kwargs)
yn = np.empty( (nsample, len(y0)), dtype=float)
yn[0, :] = y0
for k in range(nsample - 1):
idx = np.random.randint(0, len(x), len(x))
yn[k, :] = hist_with_err(x[idx], xerr[idx], bins=bins, normed=normed, step=step, **kwargs)[1]
return x0, yn
def __get_hesse_bins__(_x, _xerr=0., bins=None, margin=0.2):
if (bins is None) | (not hasattr(bins, '__iter__')):
m = (_x - _xerr).min()
M = (_x + _xerr).max()
dx = M - m
m -= margin * dx
M += margin * dx
if bins is not None:
N = int(bins)
else:
N = 10
_xp = np.linspace(m, M, N)
else:
_xp = 0.5 * (bins[1:] + bins[:-1])
return _xp
def scatter_contour(x, y,
levels=10,
bins=40,
threshold=50,
log_counts=False,
histogram2d_args={},
plot_args={},
contour_args={},
ax=None):
"""Scatter plot with contour over dense regions
Parameters
----------
x, y : arrays
x and y data for the contour plot
levels : integer or array (optional, default=10)
number of contour levels, or array of contour levels
threshold : float (default=100)
number of points per 2D bin at which to begin drawing contours
log_counts :boolean (optional)
if True, contour levels are the base-10 logarithm of bin counts.
histogram2d_args : dict
keyword arguments passed to numpy.histogram2d
see doc string of numpy.histogram2d for more information
plot_args : dict
keyword arguments passed to pylab.scatter
see doc string of pylab.scatter for more information
contourf_args : dict
keyword arguments passed to pylab.contourf
see doc string of pylab.contourf for more information
ax : pylab.Axes instance
the axes on which to plot. If not specified, the current
axes will be used
"""
if ax is None:
ax = plt.gca()
H, xbins, ybins = np.histogram2d(x, y, **histogram2d_args)
if log_counts:
H = np.log10(1 + H)
threshold = np.log10(1 + threshold)
levels = np.asarray(levels)
if levels.size == 1:
levels = np.linspace(threshold, H.max(), levels)
extent = [xbins[0], xbins[-1], ybins[0], ybins[-1]]
i_min = np.argmin(levels)
# draw a zero-width line: this gives us the outer polygon to
# reduce the number of points we draw
# somewhat hackish... we could probably get the same info from
# the filled contour below.
outline = ax.contour(H.T, levels[i_min:i_min + 1],
linewidths=0, extent=extent)
try:
outer_poly = outline.allsegs[0][0]
ax.contourf(H.T, levels, extent=extent, **contour_args)
X = np.hstack([x[:, None], y[:, None]])
try:
# this works in newer matplotlib versions
from matplotlib.path import Path
points_inside = Path(outer_poly).contains_points(X)
except:
# this works in older matplotlib versions
import matplotlib.nxutils as nx
points_inside = nx.points_inside_poly(X, outer_poly)
Xplot = X[~points_inside]
ax.plot(Xplot[:, 0], Xplot[:, 1], zorder=1, **plot_args)
except IndexError:
ax.plot(x, y, zorder=1, **plot_args)
def latex_float(f, precision=0.2, delimiter=r'\times'):
float_str = ("{0:" + str(precision) + "g}").format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return (r"{0}" + delimiter + "10^{{{1}}}").format(base, int(exponent))
else:
return float_str
#===============================================================================
#===============================================================================
#===============================================================================
def ezrc(fontSize=22., lineWidth=2., labelSize=None, tickmajorsize=10, tickminorsize=5):
"""
slides - Define params to make pretty fig for slides
"""
from pylab import rc, rcParams
if labelSize is None:
labelSize = fontSize + 5
rc('figure', figsize=(8, 6))
rc('lines', linewidth=lineWidth)
rcParams['grid.linewidth'] = lineWidth
rc('font', size=fontSize, family='serif', weight='normal')
rc('axes', linewidth=lineWidth, labelsize=labelSize)
#rc('xtick', width=2.)
#rc('ytick', width=2.)
#rc('legend', fontsize='x-small', borderpad=0.1, markerscale=1.,
rc('legend', borderpad=0.1, markerscale=1., fancybox=False)
rc('text', usetex=True)
rc('image', aspect='auto')
rc('ps', useafm=True, fonttype=3)
rcParams['xtick.major.size'] = tickmajorsize
rcParams['xtick.minor.size'] = tickminorsize
rcParams['ytick.major.size'] = tickmajorsize
rcParams['ytick.minor.size'] = tickminorsize
rcParams['font.sans-serif'] = 'Helvetica'
rcParams['font.serif'] = 'Helvetica'
#rcParams['text.latex.preamble'] = '\usepackage{pslatex}'
def hide_axis(where, ax=None):
ax = ax or plt.gca()
if type(where) == str:
_w = [where]
else:
_w = where
[sk.set_color('None') for k, sk in ax.spines.items() if k in _w ]
if 'top' in _w and 'bottom' in _w:
ax.xaxis.set_ticks_position('none')
elif 'top' in _w:
ax.xaxis.set_ticks_position('bottom')
elif 'bottom' in _w:
ax.xaxis.set_ticks_position('top')
if 'left' in _w and 'right' in _w:
ax.yaxis.set_ticks_position('none')
elif 'left' in _w:
ax.yaxis.set_ticks_position('right')
elif 'right' in _w:
ax.yaxis.set_ticks_position('left')
plt.draw_if_interactive()
def despine(fig=None, ax=None, top=True, right=True,
left=False, bottom=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure
figure to despine all axes of, default uses current figure
ax : matplotlib axes
specific axes object to despine
top, right, left, bottom : boolean
if True, remove that spine
"""
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
ax_i.spines[side].set_visible(not locals()[side])
def shift_axis(which, delta, where='outward', ax=None):
ax = ax or plt.gca()
if type(which) == str:
_w = [which]
else:
_w = which
scales = (ax.xaxis.get_scale(), ax.yaxis.get_scale())
lbls = (ax.xaxis.get_label(), ax.yaxis.get_label())
for wk in _w:
ax.spines[wk].set_position((where, delta))
ax.xaxis.set_scale(scales[0])
ax.yaxis.set_scale(scales[1])
ax.xaxis.set_label(lbls[0])
ax.yaxis.set_label(lbls[1])
plt.draw_if_interactive()
class AutoLocator(MaxNLocator):
def __init__(self, nbins=9, steps=[1, 2, 5, 10], **kwargs):
MaxNLocator.__init__(self, nbins=nbins, steps=steps, **kwargs )
def setMargins(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):
"""
Tune the subplot layout via the meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
plt.draw_if_interactive()
def setNmajors(xval=None, yval=None, ax=None, mode='auto', **kwargs):
"""
setNmajors - set major tick number
see figure.MaxNLocator for kwargs
"""
if ax is None:
ax = plt.gca()
if (mode == 'fixed'):
if xval is not None:
ax.xaxis.set_major_locator(MaxNLocator(xval, **kwargs))
if yval is not None:
ax.yaxis.set_major_locator(MaxNLocator(yval, **kwargs))
elif (mode == 'auto'):
if xval is not None:
ax.xaxis.set_major_locator(AutoLocator(xval, **kwargs))
if yval is not None:
ax.yaxis.set_major_locator(AutoLocator(yval, **kwargs))
plt.draw_if_interactive()
def crazy_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None, reinterp=None):
"""
Compute the sparse bi-dimensional histogram of two data samples where *x*,
and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
If *weights* is specified, it specifies values at the coordinate (x[i],
y[i]). These values are accumulated for each bin and then reduced according
to *reduce_w* function, which defaults to numpy's sum function (np.sum).
(If *weights* is specified, it must also be a 1-D sequence of the same
length as *x* and *y*.)
INPUTS:
x ndarray[ndim=1] first data sample coordinates
y ndarray[ndim=1] second data sample coordinates
KEYWORDS:
bins the bin specification
int the number of bins for the two dimensions (nx=ny=bins)
or [int, int] the number of bins in each dimension (nx, ny = bins)
weights ndarray[ndim=1] values *w_i* weighing each sample *(x_i, y_i)*
accumulated and reduced (using reduced_w) per bin
reduce_w callable function that will reduce the *weights* values accumulated per bin
defaults to numpy's sum function (np.sum)
NULL value type filling missing data value
reinterp str values are [None, 'nn', linear']
if set, reinterpolation is made using mlab.griddata to fill missing data
within the convex polygone that encloses the data
OUTPUTS:
B ndarray[ndim=2] bi-dimensional histogram
extent tuple(4) (xmin, xmax, ymin, ymax) entension of the histogram
steps tuple(2) (dx, dy) bin size in x and y direction
"""
# define the bins (do anything you want here but needs edges and sizes of the 2d bins)
try:
nx, ny = bins
except TypeError:
nx = ny = bins
#values you want to be reported
if weights is None:
weights = np.ones(x.size)
if reduce_w is None:
reduce_w = np.sum
else:
if not hasattr(reduce_w, '__call__'):
raise TypeError('reduce function is not callable')
# culling nans
finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))
_x = np.asarray(x)[finite_inds]
_y = np.asarray(y)[finite_inds]
_w = np.asarray(weights)[finite_inds]
if not (len(_x) == len(_y)) & (len(_y) == len(_w)):
raise ValueError('Shape mismatch between x, y, and weights: {}, {}, {}'.format(_x.shape, _y.shape, _w.shape))
xmin, xmax = _x.min(), _x.max()
ymin, ymax = _y.min(), _y.max()
dx = (xmax - xmin) / (nx - 1.0)
dy = (ymax - ymin) / (ny - 1.0)
# Basically, this is just doing what np.digitize does with one less copy
xyi = np.vstack((_x, _y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = np.floor(xyi, xyi).T
#xyi contains the bins of each point as a 2d array [(xi,yi)]
d = {}
for e, k in enumerate(xyi.T):
key = (k[0], k[1])
if key in d:
d[key].append(_w[e])
else:
d[key] = [_w[e]]
_xyi = np.array(d.keys()).T
_w = np.array([ reduce_w(v) for v in d.values() ])
# exploit a sparse coo_matrix to build the 2D histogram...
_grid = sparse.coo_matrix((_w, _xyi), shape=(nx, ny))
if reinterp is None:
#convert sparse to array with filled value
## grid.toarray() does not account for filled value
## sparse.coo.coo_todense() does actually add the values to the existing ones, i.e. not what we want -> brute force
if NULL is None:
B = _grid.toarray()
else: # Brute force only went needed
B = np.zeros(_grid.shape, dtype=_grid.dtype)
B.fill(NULL)
for (x, y, v) in zip(_grid.col, _grid.row, _grid.data):
B[y, x] = v
else: # reinterp
xi = np.arange(nx, dtype=float)
yi = np.arange(ny, dtype=float)
B = griddata(_grid.col.astype(float), _grid.row.astype(float), _grid.data, xi, yi, interp=reinterp)
return B, (xmin, xmax, ymin, ymax), (dx, dy)
def histplot(data, bins=10, range=None, normed=False, weights=None, density=None, ax=None, **kwargs):
""" plot an histogram of data `a la R`: only bottom and left axis, with
dots at the bottom to represent the sample
Example
-------
import numpy as np
x = np.random.normal(0, 1, 1e3)
histplot(x, bins=50, density=True, ls='steps-mid')
"""
h, b = np.histogram(data, bins, range, normed, weights, density)
if ax is None:
ax = plt.gca()
x = 0.5 * (b[:-1] + b[1:])
l = ax.plot(x, h, **kwargs)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
_w = ['top', 'right']
[ ax.spines[side].set_visible(False) for side in _w ]
for wk in ['bottom', 'left']:
ax.spines[wk].set_position(('outward', 10))
ylim = ax.get_ylim()
ax.plot(data, -0.02 * max(ylim) * np.ones(len(data)), '|', color=l[0].get_color())
ax.set_ylim(-0.02 * max(ylim), max(ylim))
def scatter_plot(x, y, ellipse=False, levels=[0.99, 0.95, 0.68], color='w', ax=None, **kwargs):
if ax is None:
ax = plt.gca()
if faststats is not None:
im, e = faststats.fastkde.fastkde(x, y, (50, 50), adjust=2.)
V = im.max() * np.asarray(levels)
plt.contour(im.T, levels=V, origin='lower', extent=e, linewidths=[1, 2, 3], colors=color)
ax.plot(x, y, 'b,', alpha=0.3, zorder=-1, rasterized=True)
if ellipse is True:
data = np.vstack([x, y])
mu = np.mean(data, axis=1)
cov = np.cov(data)
error_ellipse(mu, cov, ax=plt.gca(), edgecolor="g", ls="dashed", lw=4, zorder=2)
def error_ellipse(mu, cov, ax=None, factor=1.0, **kwargs):
"""
Plot the error ellipse at a point given its covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]) * factor,
height=2 * np.sqrt(S[1]) * factor,
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = plt.gca()
ax.add_patch(ellipsePlot)
return ellipsePlot
def plotCorr(l, pars, plotfunc=None, lbls=None, triangle='lower', *args, **kwargs):
""" Plot correlation matrix between variables
inputs
-------
l: dict
dictionary of variables (could be a Table)
pars: sequence of str
parameters to use
plotfunc: callable
function to be called when doing the scatter plots
lbls: sequence of str
sequence of string to use instead of dictionary keys
triangle: str in ['upper', 'lower']
Which side of the triangle to use.
*args, **kwargs are forwarded to the plot function
Example
-------
import numpy as np
figrc.ezrc(16, 1, 16, 5)
d = {}
for k in range(4):
d[k] = np.random.normal(0, k+1, 1e4)
plt.figure(figsize=(8 * 1.5, 7 * 1.5))
plotCorr(d, d.keys(), plotfunc=figrc.scatter_plot)
#plotCorr(d, d.keys(), alpha=0.2)
"""
if lbls is None:
lbls = pars
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not len(pars) - 1 in fontmap:
fontmap[len(pars) - 1] = 3
k = 1
axes = np.empty((len(pars) + 1, len(pars)), dtype=object)
for j in range(len(pars)):
for i in range(len(pars)):
if j > i:
sharex = axes[j - 1, i]
else:
sharex = None
if i == j:
# Plot the histograms.
ax = plt.subplot(len(pars), len(pars), k)
axes[j, i] = ax
n, b, p = ax.hist(l[pars[i]], bins=50, histtype="step", color=kwargs.get("color", "b"))
if triangle == 'upper':
ax.set_xlabel(lbls[i])
ax.set_ylabel(lbls[i])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks_position('right')
ax.xaxis.set_ticks_position('bottom')
if triangle == 'upper':
if i > j:
if i > j + 1:
sharey = axes[j, i - 1]
else:
sharey = None
ax = plt.subplot(len(pars), len(pars), k, sharey=sharey, sharex=sharex)
axes[j, i] = ax
if plotfunc is None:
plt.plot(l[pars[i]], l[pars[j]], ',', **kwargs)
else:
plotfunc(l[pars[i]], l[pars[j]], ax=ax, *args, **kwargs)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
if triangle == 'lower':
if i < j:
if i < j:
sharey = axes[j, i - 1]
else:
sharey = None
ax = plt.subplot(len(pars), len(pars), k, sharey=sharey, sharex=sharex)
axes[j, i] = ax
if plotfunc is None:
plt.plot(l[pars[i]], l[pars[j]], ',', **kwargs)
else:
plotfunc(l[pars[i]], l[pars[j]], ax=ax, *args, **kwargs)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
if i == 0:
ax.set_ylabel(lbls[j])
plt.setp(ax.get_yticklabels(), visible=True)
if j == len(pars) - 1:
ax.set_xlabel(lbls[i])
plt.setp(ax.get_xticklabels(), visible=True)
N = int(0.5 * fontmap[len(pars) - 1])
if N <= 4:
N = 5
setNmajors(N, N, ax=ax, prune='both')
k += 1
setMargins(hspace=0.0, wspace=0.0)
def hinton(W, bg='grey', facecolors=('w', 'k')):
"""Draw a hinton diagram of the matrix W on the current pylab axis
Hinton diagrams are a way of visualizing numerical values in a matrix/vector,
popular in the neural networks and machine learning literature. The area
occupied by a square is proportional to a value's magnitude, and the colour
indicates its sign (positive/negative).
Example usage:
R = np.random.normal(0, 1, (2,1000))
h, ex, ey = np.histogram2d(R[0], R[1], bins=15)
hh = h - h.T
hinton.hinton(hh)
"""
M, N = W.shape
square_x = np.array([-.5, .5, .5, -.5])
square_y = np.array([-.5, -.5, .5, .5])
ioff = False
if plt.isinteractive():
plt.ioff()
ioff = True
plt.fill([-.5, N - .5, N - .5, - .5], [-.5, -.5, M - .5, M - .5], bg)
Wmax = np.abs(W).max()
for m, Wrow in enumerate(W):
for n, w in enumerate(Wrow):
c = plt.signbit(w) and facecolors[1] or facecolors[0]
plt.fill(square_x * w / Wmax + n, square_y * w / Wmax + m, c, edgecolor=c)
plt.ylim(-0.5, M - 0.5)
plt.xlim(-0.5, M - 0.5)
if ioff is True:
plt.ion()
plt.draw_if_interactive()
| mit |
jljones/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
hdmetor/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
ste616/atca | atsenscalc/code/atsenscalc_routines.py | 1 | 41093 | ######################################################################
# The ATCA Sensitivity Calculator
# Calculation subroutines.
# Copyright 2015 Jamie Stevens, CSIRO
#
# This file is part of the ATCA Sensitivity Calculator.
#
# The ATCA Sensitivity Calculator is free software: you can
# redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# The ATCA Sensitivity Calculator is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ATCA Sensitivity Calculator.
# If not, see <http://www.gnu.org/licenses/>.
import os
import math
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import refract as refract
# Define some global parameters.
frequencyBands = {
# Low and high frequencies for each of the ATCA receivers, and the file to
# read to get Tsys information.
'16cm': { 'low': 1730, 'high': 2999,
'tsys': "systemps/ca02_21cm_x_polarisation.avg" },
'4cm': { 'low': 4928, 'high': 10928,
'tsys': "systemps/ca02_4cm_y_polarisation.avg" },
'15mm': { 'low': 16000, 'high': 25000,
'tsys': "systemps/12mm_recvtemps.avg" },
'7mm': { 'low': 30000, 'high': 50000,
'tsys': "systemps/ca02_7mm.avg" },
'3mm': { 'low': 83857, 'high': 104785,
'tsys': "systemps/nominal_3mm.avg" }
}
sideBands = {
# Which sideband is used per frequency range.
'USB': [ [ 4928, 10928 ], [ 41000, 50000 ], [ 97800, 104785 ] ],
'LSB': [ [ 1730, 2999 ], [ 16000, 25000 ], [ 30000, 40999 ],
[ 83857, 97799 ] ]
}
continuumBandwidth = 2049.0 # MHz bandwidth of the continuum bands.
nZoomChannels = 2048 # the number of channels each zoom has
antennaDiameter = 22.0 # metres
speedoflight = 299792458.0 # Speed of light, m/s
boltzmann = 1.3806488e-23 # J / K
latitudeATCA = -30.31288472 # degrees, latitude of the ATCA.
longitudeATCA = 149.5501388 # degrees, longitude of the ATCA.
eastAngle = longitudeATCA - 90.0
cangle = math.radians(eastAngle)
cosl = math.cos(math.radians(latitudeATCA))
sinl = math.sin(math.radians(latitudeATCA))
channelFlagging = {
# The channels always flagged in the continuum band.
'continuum': { 'CFB1M': [ 513, 1025, 1537 ],
'CFB64M': [ 9, 17, 25 ] },
# The sampling clock birdies.
'birdies': { 'CFB1M': [ 129, 157, 257, 641, 769, 1153, 1177, 1281, 1409, 1793, 1921 ] }
}
frequencyFlagging = {
# Known RFI ranges.
'rfi': [ [ 1059.0, 1075.0 ], [ 1103.0, 1117.0 ], [ 1145.0, 1159.0 ], [ 1165.0, 1191.0 ],
[ 1217.0, 1239.0 ], [ 1240.0, 1252.0 ], [ 1380.0, 1382.0 ], [ 1428.0, 1432.0 ],
[ 1436.0, 1450.0 ], [ 1456.0, 1460.0 ], [ 1493.0, 1495.0 ], [ 1499.0, 1511.0 ],
[ 1525.0, 1628.0 ], [ 2489.0, 2496.0 ], [ 2879.0, 2881.0 ], [ 5622.0, 5628.0 ],
[ 5930.0, 5960.0 ], [ 6440.0, 6480.0 ], [ 7747.0, 7777.0 ], [ 7866.0, 7896.0 ],
[ 8058.0, 8088.0 ], [ 8177.0, 8207.0 ] ]
}
# Some conversion factors.
mhzToHz = 1.0e6 # Convert MHz to Hz
degreesToArcmin = 60.0 # Convert degrees to arcminutes
degreesToArcsec = 3600.0 # Convert degrees to arcseconds
jyToSI = 1e-26 # Convert Jy to W.m^-2.s^-1
mToKm = 1e-3 # Convert m to km
# Our error handling class.
class CalcError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def frequencyOverlap(inf1, inf2, ing1, ing2):
# Given two frequency ranges f1 - f2, g1 - g2, returns the amount
# of frequency overlap.
overlap = 0.0
# Check for correct frequency order.
f1 = inf1
f2 = inf2
g1 = ing1
g2 = ing2
if (f2 < f1):
f1 = inf2
f2 = inf1
if (g2 < g1):
g1 = ing2
g2 = ing1
# Assess overlap.
if ((g1 >= f1) and (g1 <= f2)):
a1 = g2 - g1
a2 = f2 - g1
overlap += min(a1, a2)
elif ((g2 >= f1) and (g2 <= f2)):
a1 = g2 - g1
a2 = g2 - f1
overlap += min(a1, a2)
elif ((f1 >= g1) and (f1 <= g2)):
a1 = f2 - f1
a2 = g2 - f1
overlap += min(a1, a2)
elif ((f2 >= g1) and (f2 <= g2)):
a1 = f2 - f1
a2 = f2 - g1
overlap += min(a1, a2)
return overlap
def frequencyBand(freq):
# Which band does the frequency belong to?
for b in frequencyBands:
if (freq >= frequencyBands[b]['low'] and
freq <= frequencyBands[b]['high']):
return b
return None
def channelResolution(corrMode):
# Return the width of each type of channel for a given correlator mode.
if (corrMode == 'CFB1M'):
return { 'continuum': 1.0, 'zoom': (1.0 / 2048.0) }
elif (corrMode == 'CFB64M'):
return { 'continuum': 64.0, 'zoom': (64.0 / 2048.0) }
else:
return None
def makeTemplate(centreFreq, bandwidth, channelWidth):
# Make a blank spectrum that covers the specified frequency range with
# the correct channel resolution.
c = []
v = []
f = []
n = []
x = []
p = []
if (type(bandwidth) is float):
lowFreq = centreFreq - (bandwidth - channelWidth) / 2
highFreq = centreFreq + (bandwidth - channelWidth) / 2
elif (type(bandwidth) is list):
lowFreq = centreFreq - bandwidth[0]
highFreq = centreFreq + bandwidth[1]
cFreq = lowFreq
chanNum = 1
while (cFreq <= highFreq):
c.append(cFreq)
v.append(0.0)
n.append(0)
f.append(False)
p.append(0.0)
x.append(chanNum)
cFreq += channelWidth
chanNum += 1
nx = np.array(x)
for i in xrange(0, len(sideBands['LSB'])):
if ((centreFreq >= sideBands['LSB'][i][0]) and
(centreFreq <= sideBands['LSB'][i][1])):
# This band is LSB, so we flip the channel numbers.
nx = nx[::-1]
break
return { 'centreFrequency': np.array(c), 'value': np.array(v),
'count': np.array(n), 'flags': f,
'flaggedBandwidth': p,
'channelWidth': channelWidth, 'channelNumber': nx }
def averageTemplate(template):
# Return the average unflagged value of a template.
varr = np.array([])
for i in xrange(0, len(template['value'])):
if (template['flags'][i] == False):
varr = np.append(varr, template['value'][i])
return np.mean(varr)
def getFreq(item):
return item[0]
def frequencyToWavelength(freq):
# Convert a frequency in MHz to a wavelength in metres.
wl = speedoflight / (freq * mhzToHz)
return (wl)
def baselineToLambda(freq, baselineLength):
# Convert a basline length in metre to lambda for a specified frequency.
wl = frequencyToWavelength(freq)
bl = baselineLength / wl
return (bl)
def primaryBeamSize(freq, diam):
# Calculate the size of the primary beam for an antenna with a specified diameter (m),
# at the specified frequency (MHz), in arcmin.
pbfwhm = math.degrees((speedoflight / diam) / (freq * mhzToHz)) * degreesToArcmin
tpbfwhm = "%.1f" % pbfwhm
return (float(tpbfwhm))
def synthesisedBeamSize(freq, baselineLength, dec, minHa, maxHa, weightFactor):
# Calculate the synthesised beam size given a frequency, baseline length,
# declination and the minimum and maximum hour angles observed.
# Returns an array where minor axis is the first element, and major
# FWHM is the second.
blX = baselineToLambda(freq, baselineLength['dX'])
blY = baselineToLambda(freq, baselineLength['dY'])
blZ = baselineToLambda(freq, baselineLength['dZ'])
# We need the hour angles in radians.
minHaRad = math.radians(minHa * 15.0)
maxHaRad = math.radians(maxHa * 15.0)
# We use the full TMS equation 4.1 here.
# Find the hour angle for maximum u.
umaxHa = math.degrees(math.atan2(blX, blY)) / 15.0
if (umaxHa < minHa):
umaxHa = minHa
if (umaxHa > maxHa):
umaxHa = maxHa
umaxHaRad = math.radians(umaxHa * 15.0)
umax = abs(math.sin(umaxHaRad) * blX + math.cos(umaxHaRad) * blY)
# Find the hour angle for maximum v.
vmaxHa = math.degrees(math.atan2(-1 * blY, blX)) / 15.0
if (vmaxHa < minHa):
vmaxHa = minHa
if (vmaxHa > maxHa):
vmaxHa = maxHa
vmaxHaRad = math.radians(vmaxHa * 15.0)
vmax = abs(-1 * math.sin(math.radians(dec)) * math.cos(vmaxHaRad) * blX +
math.sin(math.radians(dec)) * math.sin(vmaxHaRad) * blY +
math.cos(math.radians(dec)) * blZ)
# The resolution is simply the inverse of umax or vmax, but we also
# convert to arcseconds and multiply by the image weighting factor.
ures = math.degrees(1.0 / umax) * degreesToArcsec * weightFactor
# This next line will trigger a ZeroDivisionError if the source is
# on the celestial equator and we're in an EW array.
vres = math.degrees(1.0 / vmax) * degreesToArcsec * weightFactor
# We now take care of significant figures.
utmp = "%.2f" % ures
vtmp = "%.2f" % vres
ures = float(utmp)
vres = float(vtmp)
# Make the output with the minor axis first.
res = [ ures, vres ]
if (ures > vres):
res = [ vres, ures ]
return (res)
def ellipseArea(minor, major):
# Returns the area of an ellipse with specified minor and major
# axis lengths (in arcseconds), in sr.
mn = (minor / 2.0) / degreesToArcsec
mj = (major / 2.0) / degreesToArcsec
a = math.pi * mn * mj # in square degrees
asr = a / (180.0 / math.pi)**2 # in steradians
return (asr)
def brightnessTemperatureSensitivity(rms, synthBeam, freq):
# Calculate the brightness temperature sensitivity (K) of an observation
# with an RMS noise level (mJy/beam), a synthesised beam size (arcsec),
# and a frequency in MHz. This comes from TMS equation 1.2.
A = ellipseArea(synthBeam[0], synthBeam[1])
I = (rms / 1000.0) * jyToSI / A
wl = frequencyToWavelength(freq)
bt = wl * wl * I / (2.0 * boltzmann)
return (bt)
def readTsys(filename, lf, hf):
# Open the filename.
if (os.path.isfile(filename)):
# Read it.
d = np.loadtxt(filename)
# Sort it.
ds = sorted(d, key=getFreq)
# Split it.
startIndex = 0
endIndex = len(ds) - 1
llf = lf / 1000.0
hhf = hf / 1000.0
while ((ds[startIndex][0] < llf) and
(startIndex < (len(ds) - 1)) and
(ds[startIndex + 1][0] < llf)):
startIndex += 1
while ((ds[endIndex][0] > hhf) and
(endIndex > 0) and
(ds[endIndex - 1][0] > hhf)):
endIndex -= 1
if (startIndex == endIndex):
if (startIndex > 0):
startIndex -= 1
if (endIndex < (len(ds) - 1)):
endIndex += 1
dds = ds[startIndex:(endIndex + 1)]
c = [np.around(row[0] * 1000.0) for row in dds]
v = [(10 ** row[1]) for row in dds]
n = []
f = []
for i in xrange(0, len(v)):
n.append(1)
f.append(False)
return { 'centreFrequency': c, 'value': v, 'count': np.array(n),
'flags': f,
'channelWidth': 1.0 }
else:
raise CalcError("Can't find Tsys file %s." % filename)
def lowHigh(c, w):
return { 'low': c - (w / 2.0), 'high': c + (w / 2.0) }
def overlaps(a, b):
if (b['low'] >= a['high']):
return False
if (b['high'] <= a['low']):
return False
return True
def templateAverage(t):
# Divide the values by the counts.
for i in xrange(0, len(t['centreFrequency'])):
if (t['count'][i] > 0):
t['value'][i] /= float(t['count'][i])
if (isinstance(t['flags'][i], list) == True):
# Multiple input flags, we choose the most common.
ft = 0
ff = 0
for j in xrange(0, len(t['flags'][i])):
if (t['flags'][i][j]):
ft += 1
else:
ff += 1
if (ft > ff):
t['flags'][i] = True
else:
t['flags'][i] = False
def linearInterpolate(p1, p2, pi):
# Using information from p1 and p2, determine the value at pi.
run = p2['frequency'] - p1['frequency']
if (run != 0):
slope = (p2['value'] - p1['value']) / run
else:
slope = 0
nrun = pi['frequency'] - p1['frequency']
return p1['value'] + nrun * slope
def templateInterpolate(t):
# Interpolate values for channels with no counts.
# Get the array for where counts is 0 and not.
zeroes = np.where(t['count'] == 0)
good = np.where(t['count'] > 0)
# Get the arrays without zero counts.
cf = t['centreFrequency'][good]
vs = t['value'][good]
# And the values we need to interpolate for.
rf = t['centreFrequency'][zeroes]
# And then interpolate.
iv = np.interp(rf, cf, vs)
t['value'][zeroes] = iv
def templateFill(srcTemplate, destTemplate):
# Fill in a template spectrum with values from another template, and do it with
# a single pass of each array (no looping).
i = 0 # The index of the destination template bin
j = 0 # The index of the source template bin
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
dfs = lowHigh(destTemplate['centreFrequency'][i], destTemplate['channelWidth'])
while (i < len(destTemplate['centreFrequency']) and
j < len(srcTemplate['centreFrequency'])):
if (overlaps(dfs, sfs)):
destTemplate['value'][i] += srcTemplate['value'][j]
destTemplate['count'][i] += 1
if (destTemplate['count'][i] > 1):
if (destTemplate['count'][i] == 2):
t = [ destTemplate['flags'][i], srcTemplate['flags'][j] ]
destTemplate['flags'][i] = t
else:
destTemplate['flags'][i].append(srcTemplate['flags'][j])
else:
destTemplate['flags'][i] = srcTemplate['flags'][j]
j += 1
if (j < len(srcTemplate['centreFrequency'])):
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
elif (srcTemplate['centreFrequency'][j] < destTemplate['centreFrequency'][i]):
j += 1
if (j < len(srcTemplate['centreFrequency'])):
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
else:
i += 1
if (i < len(destTemplate['centreFrequency'])):
dfs = lowHigh(destTemplate['centreFrequency'][i], destTemplate['channelWidth'])
templateAverage(destTemplate)
# Check that the edges aren't empty
# Bottom edge.
if (destTemplate['count'][0] == 0):
# Have to interpolate from the source template.
dfs = lowHigh(destTemplate['centreFrequency'][0], destTemplate['channelWidth'])
j = 0
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
while (sfs['high'] < dfs['low']):
j += 1
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
j -= 1 # Because the breaking point is when the frequency goes too high.
i = 1
while (i < (len(destTemplate['count']) - 1) and destTemplate['count'][i] == 0):
i += 1
destTemplate['value'][0] = linearInterpolate({ 'value': srcTemplate['value'][j],
'frequency': srcTemplate['centreFrequency'][j] },
{ 'value': destTemplate['value'][i],
'frequency': destTemplate['centreFrequency'][i] },
{ 'frequency': destTemplate['centreFrequency'][0] } )
destTemplate['count'][0] = 1
# Top edge.
if (destTemplate['count'][-1] == 0):
dfs = lowHigh(destTemplate['centreFrequency'][-1], destTemplate['channelWidth'])
j = 0
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
while (sfs['high'] <= dfs['low']):
j += 1
if (j < len(srcTemplate['centreFrequency'])):
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
else:
j -= 1
break
# Break point is fine this time.
i = -2
while (destTemplate['count'][i] == 0 and (abs(i) < (len(destTemplate['centreFrequency'])))):
i -= 1
destTemplate['value'][-1] = linearInterpolate({ 'value': srcTemplate['value'][j],
'frequency': srcTemplate['centreFrequency'][j] },
{ 'value': destTemplate['value'][i],
'frequency': destTemplate['centreFrequency'][i] },
{ 'frequency': destTemplate['centreFrequency'][-1] } )
destTemplate['count'][-1] = 1
templateInterpolate(destTemplate)
def templateEfficiency():
# The template returned by this routine contains all the efficiencies for
# all the bands.
c = [ 900.0, 1200.0, 1500.0, 1800.0, 2100.0, 2300.0, 2500.0, 4400.0, 5900.0,
7400.0, 8800.0, 10600.0, 16000.0, 16500.0, 17000.0, 17500.0, 18000.0, 18500.0,
19000.0, 19500.0, 20000.0, 20500.0, 21000.0, 21500.0, 22000.0, 22500.0, 23000.0,
23500.0, 24000.0, 24500.0, 25000.0, 25400.0, 30000.0, 31000.0, 32000.0, 33000.0,
34000.0, 35000.0, 36000.0, 37000.0, 38000.0, 39000.0, 40000.0, 41000.0, 42000.0,
43000.0, 44000.0, 45000.0, 46000.0, 47000.0, 48000.0, 49000.0, 50000.0, 83781.1,
85556.2, 86834.3, 88680.5, 90526.6, 91946.7, 94005.9, 95852.1, 97272.2, 98976.3,
100254.4, 102200.0, 102300.0, 106432.0 ]
v = [ 0.57, 0.57, 0.60, 0.53, 0.43, 0.42, 0.44, 0.65, 0.72,
0.65, 0.64, 0.65, 0.58, 0.62, 0.63, 0.65, 0.67, 0.70,
0.68, 0.64, 0.64, 0.60, 0.53, 0.55, 0.54, 0.51, 0.51,
0.53, 0.49, 0.49, 0.46, 0.47, 0.60, 0.60, 0.60, 0.60,
0.60, 0.60, 0.60, 0.60, 0.60, 0.60, 0.60, 0.59, 0.58,
0.57, 0.56, 0.55, 0.54, 0.53, 0.52, 0.51, 0.50, 0.3297,
0.3065, 0.3020, 0.2856, 0.2689, 0.2670, 0.2734, 0.2727, 0.2521, 0.2403,
0.2336, 0.2322, 0.14, 0.14 ]
f = []
n = []
for i in xrange(0, len(c)):
n.append(1)
f.append(False)
return { 'centreFrequency': np.array(c), 'value': np.array(v),
'count': np.array(n), 'flags': f,
'channelWidth': 1.0 }
def fillAtmosphereTemplate(templateOpacity, templateTemperature, t, p, h):
# Calculate the opacity and atmospheric temperature at the zenith for each frequency
# in the template.
atmos = refract.calcOpacity(templateOpacity['centreFrequency'] * 1e6, math.radians(90.0), t, p, h)
templateOpacity['value'] = np.array(atmos['tau'])
templateTemperature['value'] = np.array(atmos['Tb'])
def plotTemplate(t, e, outname):
plt.clf()
plt.plot(t['centreFrequency'], t['value'])
plt.plot(e['centreFrequency'], (e['value']), "green")
plt.savefig(outname)
def plotSpectrum(template, conditions, outname):
# Plot the template spectrum we are passed with frequency on the x-axis.
# Initialise the plot.
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
# These are the colours we can use for the different lines.
colours = [ "blue", "green", "red", "black", "yellow" ]
# Go through the conditions (usually weather conditions) and plot a line for each.
for i, c in enumerate(conditions):
ax.plot(template[c]['centreFrequency'], template[c]['value'], colours[i], label=c)
# Set the x-axis limits to be tight on the actual frequency range.
plt.xlim(template[c]['centreFrequency'][0], template[c]['centreFrequency'][-1])
plt.xlabel("Frequency [MHz]")
plt.ylabel("RMS noise level [mJy/beam]")
# Ensure that the x- and y-axes don't have an offset value.
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
# Put the legend with the condition names at the top of the plot outside the border.
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=len(c), mode="expand", borderaxespad=0.)
# Highlight the regions that are flagged in the template.
for i in xrange(0, len(template[conditions[0]]['flags'])):
if (template[conditions[0]]['flags'][i] == True):
x1 = template[conditions[0]]['centreFrequency'][i] - template[c]['channelWidth'] / 2.0
x2 = template[conditions[0]]['centreFrequency'][i] + template[c]['channelWidth'] / 2.0
plt.axvspan(x1, x2, alpha=0.2, edgecolor='none', facecolor='red')
plt.savefig(outname)
def flagTemplate(t, flagType, corrMode, edgeChan):
# Set the flags in the template.
if ((flagType == "continuum") or (flagType == "birdies")):
# This is channel based flagging.
flagSrc = channelFlagging[flagType]
if (corrMode in flagSrc):
flagSrc = flagSrc[corrMode]
for c in xrange(0, len(flagSrc)):
for i in xrange(0, len(t['flags'])):
if (t['channelNumber'][i] == flagSrc[c]):
t['flags'][i] = True
break
elif (flagType == "rfi"):
# This is frequency based flagging.
oldi = 0
flagSrc = frequencyFlagging[flagType]
for r in xrange(0, len(flagSrc)):
for i in xrange(oldi, len(t['flags'])):
f1 = t['centreFrequency'][i] - t['channelWidth'] / 2.0
f2 = t['centreFrequency'][i] + t['channelWidth'] / 2.0
if (f1 > flagSrc[r][1]):
break
t['flaggedBandwidth'][i] += frequencyOverlap(f1, f2, flagSrc[r][0], flagSrc[r][1])
oldi = i
# Check for which channels are above the percentage flagged cut.
for i in xrange(0, len(t['flags'])):
p = t['flaggedBandwidth'][i] / t['channelWidth']
if (p > 0.5):
t['flags'][i] = True
elif (flagType == "edge"):
# Flag edge channels.
for i in xrange(0, len(t['channelNumber'])):
cn = i + 1
rcn = len(t['channelNumber']) - i
if ((cn <= edgeChan) or
(rcn <= edgeChan)):
t['flags'][i] = True
def calculateSensitivity(rmsTemplate, nAnts):
# Given a template filled with the RMS noise in each channel in the continuum
# band, return the average sensitivity of the continuum band channels (the spectral
# RMS), and the spectral RMS divided by the bandwidth (the continuum RMS). This
# routine does this while respecting any flagging present in the template.
rmsTotal = 0.0
rmsZoomTotal = 0.0
rmsN = 0
rmsZoomN = 0
for i in xrange(0, len(rmsTemplate['value'])):
# We count flagged continuum channels in the zoom RMS calculation.
rmsZoomTotal += rmsTemplate['value'][i]
rmsZoomN += 1
if (rmsTemplate['flags'][i] == False):
rmsTotal += rmsTemplate['value'][i]
rmsN += 1
if (rmsN > 0):
rmsSpectral = rmsTotal / rmsN
totalBandwidth = rmsN * rmsTemplate['channelWidth']
rmsContinuum = rmsSpectral / math.sqrt(float(rmsN))
rmsZoom = rmsSpectral * math.sqrt(float(nZoomChannels))
else:
rmsSpectral = None
totalBandwidth = None
rmsContinuum = None
rmsZoom = None
# Calculate the SEFDs for each antenna and the array as a whole.
# We use TMS equation 1.6 for this and convert to Jy.
Aone = surfaceArea(antennaDiameter)
Aall = nAnts * Aone
sefdOne = 2.0 * boltzmann * rmsTemplate['systemp'] / (Aone * 1e-26)
sefdAll = 2.0 * boltzmann * rmsTemplate['systemp'] / (Aall * 1e-26)
return { 'rms': { 'spectral': rmsSpectral, 'continuum': rmsContinuum, 'zoom': rmsZoom },
'bandwidth': { 'unflagged': totalBandwidth }, 'sefd': { 'antenna': sefdOne,
'array': sefdAll } }
def calculateRms(tsys, efficiency, opacity, temperature, minHa, maxHa, perHa, nAntenna,
totalTime, weighting, sind, cosd):
# Given the tsys and efficiency templates, the number of antennas involved in the
# imaging, the total integration time and the image weighting scheme, this routine
# will return another template with each channel being the RMS noise expected in
# that channel. This comes from eqn 6.62 of TMS, where eta_Q is 1 (for CABB's
# digitisation) but A is multiplied by our efficiency factor. That equation is
# for only a single polarisation though, so for an unpolarised source, the noise
# level is sqrt(2) lower, which is where the sqrt(2) factor in the numerator comes from
# instead of the 2.
# The variables for our output template.
c = []
v = []
n = []
f = []
# Figure out how much time is spent in each integration period.
haRange = maxHa - minHa
nIntegrations = math.ceil(haRange * perHa)
intTime = totalTime / nIntegrations
Texcess = []
systemTemperature = []
for j in xrange(0, int(nIntegrations + 1)):
# The hour angle at this integration.
jHa = minHa + float(j) / perHa
# The elevation at this hour angle.
cosha = math.cos(math.radians(jHa * 15.0))
sinel = sinl * sind + cosl * cosd * cosha
# Calculate the excess temperature due to the atmosphere
# and CMB.
elFactor = np.exp(-1.0 * opacity['value'] / sinel)
cbFactor = 2.7 * elFactor
ivFactor = 1.0 - elFactor
atFactor = temperature['value'] * ivFactor
Texcess.append(atFactor + cbFactor)
for i in xrange(0, len(tsys['centreFrequency'])):
# Check that the frequencies are the same in both templates.
if (tsys['centreFrequency'][i] == efficiency['centreFrequency'][i]):
# All's good.
c.append(tsys['centreFrequency'][i])
# Get the average excess temperature now.
exTemp = [ row[i] for row in Texcess ]
excessTemp = np.sum(exTemp) / float(len(exTemp))
Tmeas = tsys['value'][i] + excessTemp
TmeasEff = Tmeas / efficiency['value'][i]
# The units of this is actually mJy since we keep the frequency
# in MHz rather than converting to Hz (convenient isn't it!).
v.append((math.sqrt(2.0) * boltzmann * Tmeas * weighting['avg']) /
(1e-26 * surfaceArea(antennaDiameter) *
efficiency['value'][i] *
math.sqrt(float(nAntenna) * float(nAntenna - 1) *
tsys['channelWidth'] * (totalTime * 60.0))))
n.append(1)
if (tsys['flags'][i] or efficiency['flags'][i]):
f.append(True)
else:
f.append(False)
systemTemperature.append(TmeasEff)
return { 'centreFrequency': np.array(c), 'value': np.array(v),
'count': np.array(n), 'flags': f,
'channelWidth': tsys['channelWidth'], 'channelNumber': tsys['channelNumber'],
'systemp': np.mean(systemTemperature),
'systemTemperature': float("%.1f" % np.mean(systemTemperature))}
def surfaceArea(d):
# Given the diameter of a dish (m), return its surface area (m^2).
return (math.pi * ((d / 2.0) ** 2))
def weightingFactor(weighting, array, ant6):
# Return the w_rms / w_mean weighting factors given the weighting scheme,
# the array configuration and whether antenna 6 is included in the imaging.
# The weighting factors we determined from simulations.
factors = [
{ 'array': [ '6000', '6km', '3000', '3km' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.039, 'min': 1.000, 'max': 1.079, 'beam': 1.32 },
'R1': { 'avg': 1.040, 'min': 1.000, 'max': 1.080, 'beam': 1.32 },
'R0': { 'avg': 1.871, 'min': 1.350, 'max': 2.781, 'beam': 0.84 },
'R-1': { 'avg': 5.791, 'min': 3.685, 'max': 10.987, 'beam': 0.80 },
'R-2': { 'avg': 5.847, 'min': 3.688, 'max': 11.240, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.97 },
'R1': { 'avg': 1.002, 'min': 1.001, 'max': 1.004, 'beam': 0.89 },
'R0': { 'avg': 1.882, 'min': 1.703, 'max': 1.943, 'beam': 0.66 },
'R-1': { 'avg': 3.875, 'min': 2.543, 'max': 7.102, 'beam': 0.64 },
'R-2': { 'avg': 3.908, 'min': 2.562, 'max': 7.222, 'beam': 0.64 } } ] },
{ 'array': [ '1500', '1.5km' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.001, 'beam': 1.32 },
'R0': { 'avg': 1.507, 'min': 1.181, 'max': 1.846, 'beam': 0.84 },
'R-1': { 'avg': 7.925, 'min': 5.200, 'max': 16.732, 'beam': 0.80 },
'R-2': { 'avg': 8.151, 'min': 5.163, 'max': 19.304, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.96 },
'R1': { 'avg': 1.001, 'min': 1.000, 'max': 1.003, 'beam': 0.88 },
'R0': { 'avg': 1.854, 'min': 1.576, 'max': 1.953, 'beam': 0.64 },
'R-1': { 'avg': 3.900, 'min': 2.524, 'max': 8.218, 'beam': 0.62 },
'R-2': { 'avg': 3.923, 'min': 2.506, 'max': 8.707, 'beam': 0.62 } } ] },
{ 'array': [ '750', '750m' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R0': { 'avg': 1.299, 'min': 1.143, 'max': 1.621, 'beam': 0.84 },
'R-1': { 'avg': 12.893, 'min': 8.581, 'max': 17.674, 'beam': 0.80 },
'R-2': { 'avg': 14.027, 'min': 8.882, 'max': 22.273, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.96 },
'R1': { 'avg': 1.001, 'min': 1.000, 'max': 1.002, 'beam': 0.88 },
'R0': { 'avg': 1.925, 'min': 1.850, 'max': 1.971, 'beam': 0.62 },
'R-1': { 'avg': 3.557, 'min': 2.578, 'max': 5.255, 'beam': 0.59 },
'R-2': { 'avg': 3.582, 'min': 2.583, 'max': 5.369, 'beam': 0.59 } } ] },
{ 'array': [ '367', 'EW352', 'EW367', 'EW352/367' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R0': { 'avg': 1.077, 'min': 1.029, 'max': 1.157, 'beam': 0.84 },
'R-1': { 'avg': 18.304, 'min': 16.432, 'max': 17.498, 'beam': 0.80 },
'R-2': { 'avg': 31.295, 'min': 20.574, 'max': 52.204, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.09 },
'R1': { 'avg': 1.001, 'min': 1.000, 'max': 1.001, 'beam': 1.00 },
'R0': { 'avg': 1.917, 'min': 1.838, 'max': 1.965, 'beam': 0.68 },
'R-1': { 'avg': 3.271, 'min': 2.537, 'max': 4.639, 'beam': 0.64 },
'R-2': { 'avg': 3.298, 'min': 2.550, 'max': 4.718, 'beam': 0.64 } } ] },
{ 'array': [ 'h214', 'H214', 'h168', 'H168', 'h75', 'H75' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32},
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32},
'R0': { 'avg': 1.106, 'min': 1.023, 'max': 1.186, 'beam': 0.84},
'R-1': { 'avg': 16.865, 'min': 15.629, 'max': 18.294, 'beam': 0.80 },
'R-2': { 'avg': 26.926, 'min': 18.717, 'max': 58.094, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.75 },
'R1': { 'avg': 1.001, 'min': 1.001, 'max': 1.002, 'beam': 0.74 },
'R0': { 'avg': 1.641, 'min': 1.529, 'max': 1.760, 'beam': 0.61 },
'R-1': { 'avg': 1.984, 'min': 1.753, 'max': 2.281, 'beam': 0.59 },
'R-2': { 'avg': 1.988, 'min': 1.755, 'max': 2.288, 'beam': 0.59 } } ] } ]
# Search through the array.
for f in xrange(0, len(factors)):
arrayFound = False
for i in xrange(0, len(factors[f]['array'])):
if (array == factors[f]['array'][i]):
arrayFound = True
break
if (arrayFound):
g = 1
if (ant6):
g = 0
if (weighting in factors[f]['factors'][g]):
return (factors[f]['factors'][g][weighting])
# We couldn't find the appropriate weighting factors.
raise CalcError("Weighting factors are not available.")
def maximumBaseline(array):
# Return the maximum baseline length for a named array, both on the track
# only, and including CA06.
stationLocations = {
'W0': [ -4752438.459, 2790321.299, -3200483.747 ],
'W2': [ -4752422.922, 2790347.675, -3200483.747 ],
'W4': [ -4752407.385, 2790374.052, -3200483.747 ],
'W6': [ -4752391.848, 2790400.428, -3200483.747 ],
'W8': [ -4752376.311, 2790426.804, -3200483.747 ],
'W10': [ -4752360.774, 2790453.181, -3200483.747 ],
'W12': [ -4752345.237, 2790479.557, -3200483.747 ],
'W14': [ -4752329.700, 2790505.934, -3200483.747 ],
'W16': [ -4752314.163, 2790532.310, -3200483.747 ],
'W32': [ -4752189.868, 2790743.321, -3200483.747 ],
'W45': [ -4752088.877, 2790914.767, -3200483.747 ],
'W64': [ -4751941.276, 2791165.342, -3200483.747 ],
'W84': [ -4751785.907, 2791429.106, -3200483.747 ],
'W98': [ -4751677.148, 2791613.741, -3200483.747 ],
'W100': [ -4751661.611, 2791640.117, -3200483.747 ],
'W102': [ -4751646.074, 2791666.493, -3200483.747 ],
'W104': [ -4751630.537, 2791692.870, -3200483.747 ],
'W106': [ -4751615.000, 2791719.246, -3200483.747 ],
'W109': [ -4751591.695, 2791758.810, -3200483.747 ],
'W110': [ -4751583.926, 2791771.999, -3200483.747 ],
'W111': [ -4751576.158, 2791785.187, -3200483.747 ],
'W112': [ -4751568.389, 2791798.375, -3200483.747 ],
'W113': [ -4751560.621, 2791811.563, -3200483.747 ],
'W124': [ -4751475.168, 2791956.633, -3200483.747 ],
'W125': [ -4751467.399, 2791969.821, -3200483.747 ],
'W128': [ -4751444.094, 2792009.386, -3200483.747 ],
'W129': [ -4751436.325, 2792022.574, -3200483.747 ],
'W140': [ -4751350.872, 2792167.644, -3200483.747 ],
'W147': [ -4751296.492, 2792259.961, -3200483.747 ],
'W148': [ -4751288.724, 2792273.149, -3200483.747 ],
'W163': [ -4751172.197, 2792470.972, -3200483.747 ],
'W168': [ -4751133.354, 2792536.913, -3200483.747 ],
'W172': [ -4751102.281, 2792589.666, -3200483.747 ],
'W173': [ -4751094.512, 2792602.854, -3200483.747 ],
'W182': [ -4751024.596, 2792721.547, -3200483.747 ],
'W189': [ -4750970.216, 2792813.865, -3200483.747 ],
'W190': [ -4750962.448, 2792827.053, -3200483.747 ],
'W195': [ -4750923.605, 2792892.994, -3200483.747 ],
'W196': [ -4750915.837, 2792906.182, -3200483.747 ],
'W392': [ -4749393.198, 2795491.050, -3200483.694 ],
'N2': [ -4751628.291, 2791727.075, -3200457.305 ],
'N5': [ -4751648.226, 2791738.818, -3200417.642 ],
'N7': [ -4751661.517, 2791746.647, -3200391.200 ],
'N11': [ -4751688.098, 2791762.304, -3200338.316 ],
'N14': [ -4751708.034, 2791774.047, -3200298.653 ] }
endStations = [
{ 'array': [ '6000', '6km', '3000', '3km' ],
'stations': [ 'W2', 'W196' ] },
{ 'array': [ '1500', '1.5km' ],
'stations': [ 'W98', 'W195' ] },
{ 'array': [ '750', '750m' ],
'stations': [ 'W98', 'W148' ] },
{ 'array': [ '367', 'EW367', 'EW352/367' ],
'stations': [ 'W104', 'W128' ] },
{ 'array': [ 'EW352' ],
'stations': [ 'W102', 'W125' ] },
{ 'array': [ 'h214', 'H214' ],
'stations': [ 'W98', 'W113', 'W104', 'N14' ] },
{ 'array': [ 'h168', 'H168' ],
'stations': [ 'W100', 'W111', 'W104', 'N11' ] },
{ 'array': [ 'h75', 'H75' ],
'stations': [ 'W104', 'W109', 'W104', 'N5' ] } ]
# Search through the array.
for s in xrange(0, len(endStations)):
if (array in endStations[s]['array']):
# Calculate the maximum dX, dY, dZ.
mdX = 0
mdY = 0
mdZ = 0
for i in xrange(0, len(endStations[s]['stations']) - 1):
for j in xrange(i + 1, len(endStations[s]['stations'])):
dYp = abs(stationLocations[endStations[s]['stations'][i]][0] -
stationLocations[endStations[s]['stations'][j]][0])
dXp = abs(stationLocations[endStations[s]['stations'][i]][1] -
stationLocations[endStations[s]['stations'][j]][1])
dX = dXp * math.cos(cangle) - dYp * math.sin(cangle)
dY = dXp * math.sin(cangle) + dYp * math.cos(cangle)
if (dX > mdX):
mdX = dX
if (dY > mdY):
mdY = dY
dZ = abs(stationLocations[endStations[s]['stations'][i]][2] -
stationLocations[endStations[s]['stations'][j]][2])
if (dZ > mdZ):
mdZ = dZ
mdX6 = 0
mdY6 = 0
mdZ6 = 0
for i in xrange(0, len(endStations[s]['stations'])):
dYp = abs(stationLocations[endStations[s]['stations'][i]][0] -
stationLocations['W392'][0])
dXp = abs(stationLocations[endStations[s]['stations'][i]][1] -
stationLocations['W392'][1])
dX = dXp * math.cos(cangle) - dYp * math.sin(cangle)
dY = dXp * math.sin(cangle) + dYp * math.cos(cangle)
if (dX > mdX6):
mdX6 = dX
if (dY > mdY6):
mdY6 = dY
dZ = abs(stationLocations[endStations[s]['stations'][i]][2] -
stationLocations['W392'][2])
if (dZ > mdZ6):
mdZ6 = dZ
return ( { 'track': { 'dX': mdX, 'dY': mdY, 'dZ': mdZ },
'ca06': { 'dX': mdX6, 'dY': mdY6, 'dZ': mdZ6 } } )
# We couldn't find the appropriate maximum baseline lengths.
raise CalcError("Baseline lengths are not available.")
def addToOutput(output, item, name, value, description, units):
# Add some information to the JSON output object.
if (isinstance(name, list) == False):
output[item][name] = value
output['description'][name] = description
if (units is not None):
output['units'][name] = units
else:
if (name[0] not in output[item]):
output[item][name[0]] = {}
output['description'][name[0]] = description
if (units is not None):
output['units'][name[0]] = units
output[item][name[0]][name[1]] = value
def bandwidthToVelocity(lfreq, bw, restfreq):
# Given a rest frequency in MHz, calculate the velocity span represented
# by the specified bandwidth (MHz) starting at some low frequency (MHz).
vr = bw * restfreq * speedoflight * mToKm / (lfreq * (lfreq + bw))
vro = float("%.3f" % vr)
return (vro)
| gpl-3.0 |
troycomi/microMS | GUICanvases/microMSModel.py | 1 | 34074 |
from PIL import ImageDraw, ImageFont
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
import os
import random
from scipy.spatial.distance import pdist
import numpy as np
from copy import deepcopy, copy
from GUICanvases import GUIConstants
from ImageUtilities import slideWrapper
from ImageUtilities import blobFinder
from ImageUtilities import blob
from ImageUtilities import TSPutil
from ImageUtilities.enumModule import Direction, StepSize
from ImageUtilities import blobList
from CoordinateMappers import supportedCoordSystems
class MicroMSModel(object):
'''
The model of a microMS experiment consisting of a slide, blob finder, and blobs
Performs several vital functions for interacting with each object and maintains a list of blobs
'''
def __init__(self, GUI):
'''
Initialize a new model setup. Slide starts as None.
The coordinateMapper is set as the first mapper of the supported mappers.
Also calls self.resetVariables to clear other instance variables.
GUI: the supporting GUI
'''
self.slide = None
self.coordinateMapper = supportedCoordSystems.supportedMappers[0]
self.GUI = GUI
self.resetVariables()
def setupMicroMS(self, filename):
'''
Loads an image and sets up a new session
filename: the image to load
'''
self.slide = slideWrapper.SlideWrapper(filename)
self.resetVariables()
def resetVariables(self):
'''
Clears and initializes all instance variables
'''
self.blobCollection = [blobList.blobList(self.slide) for i in range(10)]
self.setCurrentBlobs(0)
self.tempBlobs = None
self.histogramBlobs = None
self.histColors = None
self.coordinateMapper.clearPoints()
self.mirrorImage = False
self.showPatches = True
self.drawAllBlobs = False
self.showPrediction = False
self.showThreshold = False
def setCoordinateMapper(self, newMapper):
'''
Sets a new coordinate mapper and clears its points
newMapper: the new instance of coordinateMapper to use
'''
self.coordinateMapper = newMapper
self.coordinateMapper.clearPoints()
def saveEntirePlot(self, fileName, ROI = None):
'''
saves the entire slide image at the current zoom level
fileName: the file to write to
*NOTE: this can take a while to run and generate large files at max zoom
'''
#save the current size and position
size, pos = self.slide.size, self.slide.pos
if ROI is None:
#match size to whole slide, position at center
self.slide.size, self.slide.pos = \
(self.slide.dimensions[0]//2**self.slide.lvl,
self.slide.dimensions[1]//2**self.slide.lvl), \
(self.slide.dimensions[0]//2, self.slide.dimensions[1]//2)
else:
#limit to just the ROI bounds
minx = maxx = ROI[0][0]
miny = maxy = ROI[0][1]
for r in ROI:
minx = minx if minx < r[0] else r[0]
maxx = maxx if maxx > r[0] else r[0]
miny = miny if miny < r[1] else r[1]
maxy = maxy if maxy > r[1] else r[1]
self.slide.size = ((maxx-minx) // 2 **self.slide.lvl, (maxy-miny) //2**self.slide.lvl)
self.slide.pos = ((maxx-minx) // 2 + minx, (maxy-miny) //2 + miny)
self.slide.size = [int(i) for i in self.slide.size]
self.slide.pos = [int(i) for i in self.slide.pos]
#get whole image
wholeImg = self.slide.getImg()
draw = ImageDraw.Draw(wholeImg)
#markup image
linWid = 1 if 6-self.slide.lvl < 1 else 6-self.slide.lvl
tfont = ImageFont.truetype("arial.ttf",linWid+6)
#for each blob list
for ii in range(len(self.blobCollection)):
if self.blobCollection[ii].length() > 0:
drawnlbls = set()
drawlbl = self.blobCollection[ii].blobs[0].group is not None
#for each blob
for i,gb in enumerate(self.blobCollection[ii].blobs):
p = self.slide.getLocalPoint((gb.X,gb.Y))
rad = gb.radius/2**self.slide.lvl
#draw blob outline
draw.ellipse((p[0]-rad, p[1]-rad, p[0]+rad, p[1]+rad), outline=GUIConstants.MULTI_BLOB[ii])
#draw label if group exists
if drawlbl and gb.group not in drawnlbls:
draw.text((p[0]+10/2**self.slide.lvl,p[1]-10/2**self.slide.lvl),
str(gb.group),
font=tfont, fill=GUIConstants.EXPANDED_TEXT)
drawnlbls.add(gb.group)
#roi
if ROI is not None:
ROI = [self.slide.getLocalPoint(r) for r in ROI]
ROI.append(ROI[0])
draw.line([(x[0], x[1]) for x in ROI], fill = GUIConstants.ROI)
#save image
wholeImg.save(fileName)
#restore size and position
self.slide.size, self.slide.pos = size, pos
def saveCurrentBlobFinding(self, filename):
'''
Save the current blob finder and currently selected blob list
filename: file to save to
'''
#slide not set up
if self.slide is None:
return "No slide loaded"
#current list is empty
if self.blobCollection[self.currentBlobs].length() == 0:
return "List {} contains no blobs!".format(self.currentBlobs +1) #plus one for GUI display
#save blobs
self.blobCollection[self.currentBlobs].saveBlobs(filename)
return "Saved blob information of list {}".format(self.currentBlobs+1)
def saveHistogramBlobs(self, filename):
'''
Save up to 3 files for different histogram filters
filename: the filename to save
'''
#slide not set up
if self.slide is None:
return "No slide loaded"
#no histogram blobs to save
if self.histogramBlobs is None or len(self.histogramBlobs) == 0:
return "No histogram divisions provided"
#save different divisions
f, ex = os.path.splitext(filename)
for blbs in self.histogramBlobs:
if blbs.length() > 0:
blbs.saveBlobs('{}_{}_{}{}'.format(f, blbs.description, blbs.threshCutoff, ex))
return "Saved histogram divisions with base name {}".format(os.path.split(f)[1])
def saveAllBlobs(self, filename):
'''
Save each list of blobs in it's own list
filename: a full filename with extension. The list number will be added as such:
dir/test.txt -> dir/test_1.txt
'''
#slide not set up
if self.slide is None:
return "No slide loaded"
f, ex = os.path.splitext(filename)
#save each blob list
for i, blbs in enumerate(self.blobCollection):
if blbs.length() > 0:
blbs.saveBlobs('{}_{}{}'.format(f, i, ex))
return "Saved blobs with base name '{}'".format(os.path.split(f)[1])
def saveCoordinateMapper(self, filename):
'''
Save the current coordinate mapper
filename: file to save to
'''
#no fiducials trained
if len(self.coordinateMapper.pixelPoints) < 1:
return "No coordinates to save"
self.coordinateMapper.saveRegistration(filename)
return "Saved coordinate mapper"
def saveInstrumentPositions(self, filename, tspOpt, maxPoints = None):
'''
save positions of blobs in instrument coordinate system
fileName: file to save to
tspOpt: bool indicating whether or not to perform traveling salesman optimization
maxPoints: maximum number of blobs to save. Default (None) saves all
'''
#check if the file can be saved
if len(self.coordinateMapper.physPoints) < 2:
return "Not enough training points to save instrument file"
if self.blobCollection[self.currentBlobs].length() == 0:
return "No blobs to save"
#get current blob list
blobs = self.blobCollection[self.currentBlobs].blobs
#if maxPoints is valid
if maxPoints is not None and maxPoints > 0 and maxPoints < self.currentBlobLength():
#obtain a random sample of blobs
blobs = random.sample(blobs,maxPoints)
#if tspOpt is requested
if tspOpt == True:
#reorder visit order
soln = TSPutil.TSPRoute(blob.blob.getXYList(blobs))
blobs = [blobs[i] for i in soln]
#save list of blobs
self.coordinateMapper.saveInstrumentFile(filename,
blobs)
return "Saved instrument file of list {}".format(self.currentBlobs +1 )
def saveInstrumentRegistrationPositions(self, filename):
'''
Save fiducial locations in the instrument coordinate system
'''
if len(self.coordinateMapper.physPoints) < 2:
return "Not enough training points to save fiducial locations"
self.coordinateMapper.saveInstrumentRegFile(filename)
return "Saved instrument registration positions"
def loadCoordinateMapper(self,filename):
'''
load a prior registration file
changes the current mapper to the one specified in the file
filename: file to load
returns a status string to display, and the index of the new mapper
'''
#get old index
old = supportedCoordSystems.supportedMappers.index(self.coordinateMapper)
#get first line in file
reader = open(filename,'r')
line = reader.readline().strip()
reader.close()
#see if that is a name of a coordinatemapper
try:
i = supportedCoordSystems.supportedNames.index(line)
except:
return 'Unsupported instrument: {}'.format(line), old
#See if mapper has changed to warn the user
result = 'Loaded {} registration'.format(line)
if i != old:
result = 'Warning, changing instrument to {}'.format(line)
self.coordinateMapper = supportedCoordSystems.supportedMappers[i]
self.coordinateMapper.loadRegistration(filename)
return result, i
def loadBlobFinding(self, filename):
'''
Loads the blobs in the provided filename to the current list of blobs and sets the blobfinder to the previous values
filename: file to load
'''
self.blobCollection[self.currentBlobs].loadBlobs(filename)
return "Finished loading blob positions into list {}".format(self.currentBlobs+1)
def loadInstrumentPositions(self, filename):
'''
Load a instrument position file to the current blob list.
Will not have proper radius, but should retain the groups.
filename: file to load
'''
self.blobCollection[self.currentBlobs].blobs = \
self.coordinateMapper.loadInstrumentFile(filename)
self.blobCollection[self.currentBlobs].generateGroupLabels()
return "Finished loading instrument file into list {}".format(self.currentBlobs+1)
def currentBlobLength(self):
'''
Gets the length of the current blob list
'''
return self.blobCollection[self.currentBlobs].length()
def currentInstrumentExtension(self):
'''
Gets the instrument extension of the current coordinate mapper
'''
return self.coordinateMapper.instrumentExtension
def runGlobalBlobFind(self):
'''
Performs global blob finding on the current slide and sets to current blob list
'''
if self.slide is None:
return "No slide was open"
return self.blobCollection[self.currentBlobs].blobSlide() + " in list {}".format(self.currentBlobs+1)
def updateCurrentBlobs(self, newBlobs):
if not isinstance(newBlobs, blobList.blobList):
raise ValueError('New blobs must be a blobList')
#find first unused blob index
for i in range(len(self.blobCollection)):
if self.blobCollection[i].length() == 0:
#add new blobs
self.blobCollection[i] = newBlobs
self.setCurrentBlobs(i)
return
def distanceFilter(self, distance):
'''
filters the global blob list to remove blobs which are closer than 'distance' pixels
the prior list is stored in previous current index
distance: distance threshold
'''
if self.currentBlobLength() == 0:
return "No blobs to filter"
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs].distanceFilter(distance, verbose = True))
return "Finished distance filter in list {}".format(self.currentBlobs+1)
def roiFilter(self):
if self.currentBlobLength() == 0:
return "No blobs to filter"
if len(self.blobCollection[self.currentBlobs].ROI) < 3:
return "No ROI selected"
startLen = self.currentBlobLength()
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs].roiFilter())
endLen = self.currentBlobLength()
return "{} blobs removed, {} remain in list {}".format(startLen - endLen, endLen, self.currentBlobs+1)
def roiFilterInverse(self):
if self.currentBlobLength() == 0:
return "No blobs to filter"
if len(self.blobCollection[self.currentBlobs].ROI) < 3:
return "No ROI selected"
startLen = self.currentBlobLength()
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs].roiFilterInverse())
endLen = self.currentBlobLength()
return "{} blobs removed, {} remain in list {}".format(startLen - endLen, endLen, self.currentBlobs+1)
def hexPackBlobs(self, separation, layers, dynamicLayering = False):
'''
expands each blob into hexagonally closest packed positions
sep: minimum separation between points
layers: number of layers to generate
dynamicLayering: adjust the number of layers with the blob radius
'''
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs]\
.hexagonallyClosePackPoints(separation, layers, dynamicLayering = dynamicLayering))
def rectPackBlobs(self, separation, layers, dynamicLayering = False):
'''
expands each blob into rectangularly packed positions
sep: minimum separation between points
layers: number of layers to generate
dynamicLayering: adjust the number of layers with the blob radius
'''
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs]\
.rectangularlyPackPoints(separation, layers, dynamicLayering = dynamicLayering))
def circularPackBlobs(self, separation, maxShots, offset):
'''
expands each blob into circularly packed positions around the blob
sep: minimum separation between spots
shots: maximum number of spots to place around each blob
offset: offset from the current circumference,
offset > 0 places spots outside the current blob
'''
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs]\
.circularPackPoints(separation, maxShots, offset, minSpots = 4))
def analyzeAll(self):
'''
if the current mapper is connected to an instrument, triggers analysis of all blobs currently found
'''
#get all pixel points and translate to motor coords
if self.currentBlobLength() == 0:
return "No targets currently selected"
if len(self.coordinateMapper.physPoints) <= 2:
return "Not enough training points"
if self.coordinateMapper.connectedInstrument is None or \
self.coordinateMapper.connectedInstrument.connected == False:
return "No connected instrument"
targets = list(map(lambda b: self.coordinateMapper.translate((b.X, b.Y)),
self.blobCollection[self.currentBlobs].blobs))
#send to connected instrument
self.coordinateMapper.connectedInstrument.collectAll(targets)
return "Finished collection"
def setBlobSubset(self, blobSubset):
'''
Sets the histogram blobs supplied by a histcanvas
blobSubset: an odd object, tuple of lists, first is a blobList, second a list of colors
'''
self.histogramBlobs = blobSubset[0]
self.histColors = blobSubset[1]
def reportSlideStep(self, direction, stepSize):
'''
Moves the slide in the specified direction, taking into account mirroring
direction: a slideWrapper.direction in the observed direction
stepSize: enum dictating if the step size
'''
if self.slide is not None:
if self.mirrorImage:
if direction == Direction.left:
self.slide.step(Direction.right, stepSize)
elif direction == Direction.right:
self.slide.step(Direction.left, stepSize)
else:
self.slide.step(direction, stepSize)
self.slide.step(direction, stepSize)
def testBlobFind(self):
'''
Performs a test blob find on the current position
Sets the zoom level to the maximum value to match test blob finding
'''
self.slide.lvl = 0
if self.slide is not None:
self.tempBlobs = self.blobCollection[self.currentBlobs].blobFinder.blobImg()
def setCurrentBlobs(self, ind):
'''
Sets the current blob index to the specified value
ind: integer value of list to show
'''
self.currentBlobs = ind
if self.GUI is not None:
self.GUI.setTitle(self.currentBlobs)
def reportSize(self, newSize):
'''
Sets the size of the slidewrapper to the specified value.
Sets the max number of pixels to 600 but keeps the aspect ratio
newSize = (width, height)
'''
w,h = newSize
factor = 600/max(w,h)
w, h = int(w*factor), int(h*factor)
self.slide.size = [w, h]
def getCurrentImage(self):
'''
gets the image to display, accounting for showing thresholds
'''
#show the threshold image produced by blobfinder helper method
if self.showThreshold:
im, num = blobFinder.blobFinder._blbThresh(self.slide.getImg(),
self.blobCollection[self.currentBlobs].blobFinder.colorChannel,
self.blobCollection[self.currentBlobs].blobFinder.threshold)
return im
#else, use current image view
else:
return self.slide.getImg()
def getPatches(self, limitDraw):
'''
Gets the patches of all blobs, registration marks and predicted points.
limitDraw: boolean toggle to limit the number of blobs to draw
'''
ptches = []
#nothing requested or nothing to show
if self.showPatches == False or self.slide is None:
return PatchCollection(ptches)
#temp blobs from blob finding test. Only drawn once and the only displayed thing
if self.tempBlobs is not None:
ptches = [plt.Circle((blb.X, blb.Y),
blb.radius,
color = GUIConstants.TEMP_BLOB_FIND,
linewidth = 1,
fill = False)
for blb in self.tempBlobs]
#reset temp blobs
self.tempBlobs = None
#return patches, if none to show match_original needs to be false
return PatchCollection(ptches, match_original=(len(ptches) != 0))
#draw predicted points from coordinate mapper
lineWid = 1 if 6-self.slide.lvl < 1 else 6-self.slide.lvl
if self.showPrediction and len(self.coordinateMapper.physPoints) >= 2:
points, inds = self.slide.getPointsInBounds(self.coordinateMapper.predictedPoints())
ptches.extend(
[plt.Circle(p, GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
color = GUIConstants.PREDICTED_POINTS,
linewidth = lineWid,
fill = False)
for p in points]
)
#draw fiducial labels, color blended by deviation
if len(self.coordinateMapper.physPoints) > 2:
deviations = self.coordinateMapper.squareErrors()
#scale between 0 and 1
mind = min(deviations)
maxd = max(deviations)
deviations = [ (x - mind) / (maxd - mind) for x in deviations]
else:
deviations = [ 0, 0 ]
good = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL)
bad = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL_WORST)
points, inds = self.slide.getPointsInBounds(self.coordinateMapper.pixelPoints)
for i,p in enumerate(points):
#blend color based on deviation
d = deviations[inds[i]]
col = tuple(d * x + (1-d) * y for x,y in zip(bad, good))
ptches.append(
plt.Circle(p, GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
color = col,
linewidth = lineWid,
fill=False)
)
#draw region of interest
ptches.extend(self.getROIPatches())
#draw histogram blobs
if self.histogramBlobs is not None and len(self.histogramBlobs) != 0:
for i, blbs in enumerate(self.histogramBlobs):
ptches.extend(blbs.getPatches(limitDraw, self.slide, self.histColors[i]))
#draw blobs
else:
#draw all blob lists with their own color
if self.drawAllBlobs == True:
for j, blobs in enumerate(self.blobCollection):
ptches.extend(blobs.getPatches(limitDraw, self.slide, GUIConstants.MULTI_BLOB[j]))
#show only the current blob list
else:
ptches.extend(self.blobCollection[self.currentBlobs].getPatches(limitDraw, self.slide,
GUIConstants.MULTI_BLOB[self.currentBlobs]))
#return list of patches as a patch collection, if none match_original must be false
return PatchCollection(ptches, match_original=(len(ptches) != 0))
def getROIPatches(self, newPoint = None, append = False):
ptches = []
tROI = self.blobCollection[self.currentBlobs].getROI(newPoint, GUIConstants.ROI_DIST *2**self.slide.lvl, append)
if len(tROI) > 1:
verts = []
for roi in tROI:
verts.append(self.slide.getLocalPoint(roi))
verts.append(self.slide.getLocalPoint(tROI[0]))
ptches.append(mpl.patches.PathPatch(Path(verts, None),
color = GUIConstants.ROI,
fill = False))
return ptches
def reportROI(self, point, append = False):
'''
Handles ROI additions and removals based on position
point: the point in global coordinates
'''
self.blobCollection[self.currentBlobs].ROI = \
self.blobCollection[self.currentBlobs].getROI(point,
GUIConstants.ROI_DIST *2**self.slide.lvl,
append)
def drawLabels(self, axes):
'''
draw text labels on the supplied axis. Assume the axis is displaying the
slide image and blobs of the current state of everything.
'''
if self.slide is None or self.showPatches == False:
return
#fiducial labels
lineWid = 1 if 6-self.slide.lvl < 1 else 6-self.slide.lvl
#draw fiducial labels, color blended by deviation
if len(self.coordinateMapper.physPoints) > 2:
deviations = self.coordinateMapper.squareErrors()
#scale between 0 and 1
mind = min(deviations)
maxd = max(deviations)
deviations = [ (x - mind) / (maxd - mind) for x in deviations]
else:
deviations = [ 0, 0 ]
good = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL)
bad = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL_WORST)
points, inds = self.slide.getPointsInBounds(self.coordinateMapper.pixelPoints)
for i,p in enumerate(points):
#blend color based on deviation
d = deviations[inds[i]]
col = tuple(d * x + (1-d) * y for x,y in zip(bad, good))
axes.text(p[0] + GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
p[1] - GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
self.coordinateMapper.predictLabel(self.coordinateMapper.physPoints[inds[i]]),
fontsize = lineWid + 6,
fontweight='bold',
color = col,
bbox=dict(facecolor=GUIConstants.FIDUCIAL_LABEL_BKGRD))
#show group labels
#hist blobs have no text
if self.histogramBlobs is not None and len(self.histogramBlobs) != 0:
pass
#normal blobs can have group labels
else:
pass
#show group names of all lists
if self.drawAllBlobs == True:
for blobs in self.blobCollection:
self._drawBlobLabels(axes, blobs, lineWid)
#show only the current list
else:
self._drawBlobLabels(axes, self.blobCollection[self.currentBlobs], lineWid)
def _drawBlobLabels(self, axes, blobs, lineWid):
'''
Helper method to draw blob labels onto the provided axis
axes: matplotlib axes to draw text to
blobs: blobList with labels to draw
lineWid: the linewidth to use for drawing
'''
#get grouplabels from blobs
labels = list(blobs.groupLabels.keys())
pos = list(blobs.groupLabels.values())
if len(labels) != 0:
points, inds = self.slide.getPointsInBounds(pos)
for i,p in enumerate(points):
#add offset from normal position
axes.text(p[0]+GUIConstants.DEFAULT_RADIUS/2**self.slide.lvl,
p[1]-GUIConstants.DEFAULT_RADIUS/2**self.slide.lvl,
labels[inds[i]],
fontsize=lineWid+6,
color=GUIConstants.EXPANDED_TEXT)
def reportInfoRequest(self, localPoint):
'''
Handles a request for image/blob information at the supplied local point
localPoint: (x,y) tuple of the query point in the local coordinate space
of the slide image
returns a string description of the point
'''
#nothing to query against
if self.slide is None:
return "No slide loaded"
point = self.slide.getGlobalPoint(localPoint)
#if the histogram canvas is shown, highlight that blob's location
if self.GUI is not None and self.GUI.showHist:
#find blob if user clicked in bounds
if self.blobCollection[self.currentBlobs] is not None and \
self.blobCollection[self.currentBlobs].length() > 0:
points, inds = self.slide.getPointsInBounds(blob.blob.getXYList(self.blobCollection[self.currentBlobs].blobs))
found = False
for i,p in enumerate(points):
#see if click point is within radius
if (localPoint[0]-p[0])**2 + (localPoint[1] - p[1])**2 <= \
(self.blobCollection[self.currentBlobs].blobs[inds[i]].radius/2**self.slide.lvl)**2:
self.GUI.histCanvas.singleBlob = inds[i]
found = True
break
#if not found, set to None
if not found:
self.GUI.histCanvas.singleBlob = None
#get pixel color and alpha (discarded)
try:
r,g,b,a = self.slide.getImg().getpixel(localPoint)
except IndexError:
r,g,b = 0,0,0
#get the size and circ of an area > thresh if on blb view
if self.showThreshold:
area,circ = self.blobCollection[self.currentBlobs].blobFinder.getBlobCharacteristics(localPoint)
return "x = %d, y = %d r,g,b = %d,%d,%d\tArea = %d\tCirc = %.2f"%(point[0], point[1], r, g, b, area, circ)
#get fiducial localization error if in a fiducial
fle = self.coordinateMapper.getFLE(point, GUIConstants.FIDUCIAL_RADIUS)
if fle is not None:
return "x = %d, y = %d r,g,b = %d,%d,%d FLE: %d"%(point[0], point[1], r, g, b, fle)
#show rgb and x,y location
return "x = %d, y = %d r,g,b = %d,%d,%d"%(point[0], point[1], r, g, b)
def reportFiducialRequest(self, localPoint, removePoint, extras = None):
'''
handles a fiducial request.
localpoint: (x,y) tuple in the image coordinate system
removePoint: boolean toggle. If true, the closest fiducial is removed
extras: a debugging object to bypass GUI display. Must define text and ok
'''
#no slide to register against
if self.slide is None:
return "No slide loaded"
globalPos = self.slide.getGlobalPoint(localPoint)
#shift RMB to remove closest fiducial
if removePoint:
if len(self.coordinateMapper.physPoints) == 0:
return "No points to remove"
self.coordinateMapper.removeClosest(globalPos)
return "Removed fiducial"
#get physical location from user
else:
#mapper returns predicted location
predicted = self.coordinateMapper.predictName(globalPos)
#prompt user
if self.GUI is None and extras is None:
return "No input provided"
if extras is not None:#make this check first for debugging
text = extras.text
ok = extras.ok
elif self.GUI is not None:
text, ok = self.GUI.requestFiducialInput(predicted)
if ok:
#validate entry
if self.coordinateMapper.isValidEntry(text):
#add position to mapper
dev = self.coordinateMapper.addPoints(globalPos,
self.coordinateMapper.extractPoint(text))
if dev is None:
return "%s added at %d,%d" % (text, globalPos[0], globalPos[1])
else:
return "%s added at %d,%d (FLE: %d)" % (text, globalPos[0], globalPos[1], dev)
else:
return "Invalid entry: {}".format(text)
def reportBlobRequest(self, localPoint, radius):
'''
Tries to add the blob to the current blob list.
If overlap with current blob, remove that point
localPoint: (x,y) tuple in the image coordinate space
radius: the radius of the new blob to be added
'''
#no slide to add blobs onto
if self.slide is None:
return "No slide loaded"
globalPnt = self.slide.getGlobalPoint(localPoint)
added, removeInd = self.blobCollection[self.currentBlobs].blobRequest(globalPnt, radius)
if added == True:
if self.GUI is not None and self.GUI.showHist:
self.GUI.toggleHistWindow()
return "Adding blob at {}, {}".format(globalPnt[0], globalPnt[1])
else:
if self.GUI is not None and self.GUI.showHist:
self.GUI.histCanvas.removeBlob(removeInd)
return "Removed blob at {}, {}".format(globalPnt[0], globalPnt[1])
def requestInstrumentMove(self, localPoint):
'''
Handles requests for moving the connected instrument
localPoint: (x,y) tuple in the current image coordinate system
returns a string summarizing the effect of the action
'''
#no slide is set up
if self.slide is None:
return "No slide loaded"
#the connected instrument isn't initialized or present
if self.coordinateMapper.connectedInstrument is None or \
not self.coordinateMapper.connectedInstrument.connected:
return "Instrument not connected"
#perform actual movement
pixelPnt = self.slide.getGlobalPoint(localPoint)
if len(self.coordinateMapper.physPoints) >= 2:
motorPnt = self.coordinateMapper.translate(pixelPnt)
self.coordinateMapper.connectedInstrument.moveToPositionXY(motorPnt)
return "Moving to {:.0f}, {:.0f}".format(motorPnt[0], motorPnt[1])
#not enough registration points
else:
return "Not enough training points" | mit |
llondon6/kerr_public | kerr/mapqnms.py | 1 | 35883 |
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
'''Class for boxes in complex frequency space'''
# The routines of this class assist in the solving and classification of
# QNM solutions
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class cwbox:
# ************************************************************* #
# This is a class to fascilitate the solving of leaver's equations varying
# the real and comlex frequency components, and optimizing over the separation constants.
# ************************************************************* #
def __init__(this,
l,m, # QNM indeces
cwr, # Center coordinate of real part
cwc, # Center coordinate of imag part
wid, # box width
hig, # box height
res = 50, # Number of gridpoints in each dimension
parent = None, # Parent of current object
sc = None, # optiional holder for separatino constant
verbose = False, # be verbose
maxn = None, # Overtones with n>maxn will be actively ignored. NOTE that by convention n>=0.
smallboxes = True, # Toggle for using small boxes for new solutions
**kwargs ):
#
from numpy import array,complex128,meshgrid,float128
#
this.verbose,this.res = verbose,res
# Store QNM ideces
this.l,this.m = l,m
# Set box params
this.width,this.height = None,None
this.setboxprops(cwr,cwc,wid,hig,res,sc=sc)
# Initial a list of children: if a box contains multiple solutions, then it is split according to each solutions location
this.children = [this]
# Point the object to its parent
this.parent = parent
#
this.__jf__ = []
# temp grid of separation constants
this.__scgrid__ = []
# current value of scalarized work-function
this.__lvrfmin__ = None
# Dictionary for high-level data: the data of all of this object's children is collected here
this.data = {}
this.dataformat = '{ ... (l,m,n,tail_flag) : { "jf":[...],"cw":[...],"sc":[...],"lvrfmin":[...] } ... }'
# Dictionary for low-level data: If this object is fundamental, then its data will be stored here in the same format as above
this.__data__ = {}
# QNM label: (l,m,n,t), NOTE that "t" is 0 if the QNM is not a power-law tail and 1 otherwise
this.__label__ = ()
# Counter for the number of times map hass benn called on this object
this.mapcount = 0
# Default value for temporary separation constant
this.__sc__ = 4.0
# Maximum overtone label allowed. NOTE that by convention n>=0.
this.__maxn__ = maxn
#
this.__removeme__ = False
#
this.__smallboxes__ = smallboxes
#################################################################
'''************************************************************ #
Set box params & separation constant center
# ************************************************************'''
#################################################################
def setboxprops(this,cwr,cwc,wid,hig,res,sc=None,data=None,pec=None):
# import maths and other
from numpy import complex128,float128,array,linspace
import matplotlib.patches as patches
# set props for box geometry
this.center = array([cwr,cwc])
this.__cw__ = cwr + 1j*cwc # Store cw for convinience
# Boxes may only shrink. NOTE that this is usefull as some poetntial solutions, or unwanted solutions may be reomved, and we want to avoid finding them again. NOTE that this would be nice to implement, but it currently brakes the root finding.
this.width,this.height = float128( abs(wid) ),float128( abs(hig) )
# if (this.width is None) or (this.height is None):
# this.width,this.height = float128( abs(wid) ),float128( abs(hig) )
# else:
# this.width,this.height = min(float128( abs(wid) ),this.width),min(this.height,float128( abs(hig) ))
this.limit = array([this.center[0]-this.width/2.0, # real min
this.center[0]+this.width/2.0, # real max
this.center[1]-this.height/2.0, # imag min
this.center[1]+this.height/2.0]) # imag max
this.wr_range = linspace( this.limit[0], this.limit[1], res )
this.wc_range = linspace( this.limit[2], this.limit[3], res )
# Set patch object for plotting. NOTE the negative sign exists here per convention
if None is pec: pec = 'k'
this.patch = patches.Rectangle( (min(this.limit[0:2]), min(-this.limit[2:4]) ), this.width, this.height, fill=False, edgecolor=pec, alpha=0.4, linestyle='dotted' )
# set holder for separation constant value
if sc is not None:
this.__sc__ = sc
# Initiate the data holder for this box. The data holder will contain lists of spin, official cw and sc values
if data is not None:
this.data=data
#################################################################
'''************************************************************ #
Map the potential solutions in this box
# ************************************************************'''
#################################################################
def map(this,jf):
# Import useful things
from kerr import localmins # finds local minima of a 2D array
from kerr.basics import alert,green,yellow,cyan,bold,magenta,blue
from numpy import array,delete,ones
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
# Add the input jf to the list of jf values. NOTE that this is not the primary recommended list for referencing jf. Please use the "data" field instead.
this.__jf__.append(jf)
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
#
if this.verbose:
if this.parent is None:
alert('\n\n# '+'--'*40+' #\n'+blue(bold('Attempting to map qnm solutions for: jf = %1.8f'%(jf)))+'\n# '+'--'*40+' #\n','map')
else:
print '\n# '+'..'*40+' #\n'+blue('jf = %1.8f, label = %s'%(jf,this.__label__))+'\n# '+'..'*40+' #'
# Map solutions using discrete grid
if this.isfundamental():
# Brute-force calculate solutions to leaver's equations
if this.verbose: alert('Solvinq Leaver''s Eqns over grid','map')
this.__x__,this.__scgrid__ = this.lvrgridsolve(jf)
# Use a local-min finder to estimate the qnm locations for the grid of work function values, x
if this.verbose: alert('Searching for local minima. Ignoring mins on boundaries.','map')
this.__localmin__ = localmins(this.__x__,edge_ignore=True)
if this.verbose: alert('Number of local minima found: %s.'%magenta('%i'%(len(array(this.__localmin__)[0]))),'map')
# If needed, split the box into sub-boxes: Give the current box children!
this.splitcenter() # NOTE that if there is only one lcal min, then no split takes place
# So far QNM solutions have been estimates mthat have discretization error. Now, we wish to refine the
# solutions using optimization.
if this.verbose: alert('Refining QNM solution locations using a hybrid strategy.','map')
this.refine(jf)
else:
# Map solutions for all children
for child in [ k for k in this.children if this is not k ]:
child.map(jf)
# Collect QNM solution data for this BH spin. NOTE that only non-fundamental objects are curated
if this.verbose: alert('Collecting final QNM solution information ...','map')
this.curate(jf)
# Remove duplicate solutions
this.validatechildren()
#
if this.verbose: alert('Mapping of Kerr QNM with (l,m)=(%i,%i) within box now complete for this box.' % (this.l,this.m ) ,'map')
# Some book-keeping on the number of times this object has been mapped
this.mapcount += 1
# For the given bh spin, collect all QNM frequencies and separation constants within the current box
# NOTE that the outputs are coincident lists
def curate(this,jf):
#
from numpy import arange,array,sign
#
children = this.collectchildren()
cwlist,sclist = [ child.__cw__ for child in children ],[ child.__sc__ for child in children ]
if this.isfundamental():
cwlist.append( this.__cw__ )
sclist.append( this.__sc__ )
# sort the output lists by the imaginary part of the cw values
sbn = lambda k: abs( cwlist[k].imag ) # Sort By Overtone(N)
space = arange( len(cwlist) )
map_ = sorted( space, key=sbn )
std_cwlist = array( [ cwlist[k] for k in map_ ] )
std_sclist = array( [ sclist[k] for k in map_ ] )
# ---------------------------------------------------------- #
# Separate positive, zero and negative frequency solutions
# ---------------------------------------------------------- #
# Solutions with frequencies less than this value will be considered to be power-laws
pltol = 0.01
# Frequencies
sorted_cw_pos = list( std_cwlist[ (sign(std_cwlist.real) == sign(this.m)) * (abs(std_cwlist.real)>pltol) ] )
sorted_cw_neg = list( std_cwlist[ (sign(std_cwlist.real) ==-sign(this.m)) * (abs(std_cwlist.real)>pltol) ] )
sorted_cw_zro = list( std_cwlist[ abs(std_cwlist.real)<=pltol ] )
# Create a dictionary between (cw,sc) and child objects
A,B = {},{}
for child in children:
A[child] = ( child.__cw__, child.__sc__ )
B[ A[child] ] = child
#
def inferlabel( cwsc ):
cw,sc = cwsc[0],cwsc[1]
ll = this.l
if abs(cw.real)<pltol :
# power-law decay
tt = 1
nn = sorted_cw_zro.index( cw )
mm = this.m
else:
tt = 0
if sign(this.m)==sign(cw.real):
# prograde
mm = this.m
nn = sorted_cw_pos.index( cw )
else:
# retrograde
mm = -1 * this.m
nn = sorted_cw_neg.index( cw )
#
return (ll,mm,nn,tt)
# ---------------------------------------------------------- #
# Create a dictionary to keep track of potential solutions
# ---------------------------------------------------------- #
label = {}
for child in children:
cwsc = ( child.__cw__, child.__sc__ )
label[child] = inferlabel( cwsc )
child.__label__ = label[child]
#
this.label = label
'''
IMPORTANT: Here it is assumed that the solutions will change in a continuous manner, and that after the first mapping, no new solutions are of interest, unless a box-split occurs.
'''
# Store the high-level data product
for child in children:
L = this.label[child]
if not L in this.data:
this.data[ L ] = {}
this.data[ L ][ 'jf' ] = [jf]
this.data[ L ][ 'cw' ] = [ child.__cw__ ]
this.data[ L ][ 'sc' ] = [ child.__sc__ ]
this.data[ L ][ 'lvrfmin' ] = [ child.__lvrfmin__ ]
else:
this.data[ L ][ 'jf' ].append(jf)
this.data[ L ][ 'cw' ].append(child.__cw__)
this.data[ L ][ 'sc' ].append(child.__sc__)
this.data[ L ][ 'lvrfmin' ].append(child.__lvrfmin__)
# Store the information to this child also
child.__data__['jf'] = this.data[ L ][ 'jf' ]
child.__data__['cw'] = this.data[ L ][ 'cw' ]
child.__data__['sc'] = this.data[ L ][ 'sc' ]
child.__data__['lvrfmin'] = this.data[ L ][ 'lvrfmin' ]
# Refine the box center using fminsearch
def refine(this,jf):
# Import useful things
from numpy import complex128,array,linalg,log,exp,abs
from scipy.optimize import fmin,root,fmin_tnc,fmin_slsqp
from kerr.pttools import leaver_workfunction,scberti
from kerr.basics import alert,say,magenta,bold,green,cyan,yellow
from kerr import localmins # finds local minima of a 2D array
#
if this.isfundamental():
# use the box center for refined minimization
CW = complex128( this.center[0] + 1j*this.center[1] )
# SC = this.__sc__
SC = scberti( CW*jf, this.l, this.m )
state = [ CW.real,CW.imag, SC.real,SC.imag ]
#
retrycount,maxretrycount,done = -1,1,False
while done is False:
#
retrycount += 1
#
if retrycount==0:
alert(cyan('* Constructing guess using scberti-grid or extrap.'),'refine')
state = this.guess(jf,gridguess=state)
else:
alert(cyan('* Constructing guess using 4D-grid or extrap.'),'refine')
state = this.guess(jf)
# Solve leaver's equations using a hybrid strategy
cw,sc,this.__lvrfmin__,retry = this.lvrsolve(jf,state)
# If the root finder had some trouble, then mark this box with a warning (for plotting)
done = (not retry) or (retrycount>=maxretrycount)
#
if retry:
newres = 2*this.res
if this.verbose:
msg = yellow( 'The current function value is %s. Retrying root finding for %ind time with higher resolution pre-grid, and brute-force 4D.'%(this.__lvrfmin__, retrycount+2) )
alert(msg,'refine')
# say('Retrying.','refine')
# Increase the resolution of the box
this.setboxprops(this.__cw__.real,this.__cw__.imag,this.width,this.height,newres,sc=this.__sc__)
# NOTE that the commented out code below is depreciated by the use of guess() above.
# # Brute force solve again
# this.__x__,this.__scgrid__ = this.lvrgridsolve(jf,fullopt=True)
# # Use the first local min as a guess
# this.__localmin__ = localmins(this.__x__,edge_ignore=True)
# state = this.grids2states()[0]
# if this.verbose: print X.message+' The final function value is %s'%(this.__lvrfmin__)
if this.verbose: print 'The final function value is '+green(bold('%s'%(this.__lvrfmin__)))
if this.verbose:
print '\n\t Geuss cw: %s' % CW
print '\t Optimal cw: %s' % cw
print '\t Approx sc: %s' % scberti( CW*jf, this.l, this.m )
print '\t Geuss sc: %s' % (state[2]+1j*state[3])
print '\t Optimal sc: %s\n' % sc
# Set the core properties of the new box
this.setboxprops( cw.real, cw.imag, this.width,this.height,this.res,sc=sc )
# Rescale this object's boxes based on new centers
this.parent.sensescale()
else:
#
for child in [ k for k in this.children if this is not k ]:
child.refine(jf)
# Determine if the current object has more than itself as a child
def isfundamental(this):
return len(this.children) is 1
# ************************************************************* #
# Determin whether to split this box into sub-boxes (i.e. children)
# and if needed, split
# ************************************************************* #
def splitcenter(this):
from numpy import array,zeros,linalg,inf,mean,amax,amin,sqrt
from kerr.basics import magenta,bold,alert,error,red,warning,yellow
mins = this.__localmin__
num_solutions = len(array(mins)[0])
if num_solutions > 1: # Split the box
# for each min
for k in range(len(mins[0])):
# construct the center location
kr = mins[1][k]; wr = this.wr_range[ kr ]
kc = mins[0][k]; wc = this.wc_range[ kc ]
sc = this.__scgrid__[kr,kc]
# Determine the resolution of the new box
res = int( max( 20, 1.5*float(this.res)/num_solutions ) )
# Create the new child. NOTE that the child's dimensions will be set below using a standard method.
child = cwbox( this.l,this.m,wr,wc,0,0, res, parent=this, sc=sc, verbose=this.verbose )
# Add the new box to the current box's child list
this.children.append( child )
# NOTE that here we set the box dimensions of all children using the relative distances between them
this.sensescale()
# Now redefine the box size to contain all children
# NOTE that this step exists only to ensure that the box always contains all of its children's centers
children = this.collectchildren()
wr = array( [ child.center[0] for child in children ] )
wc = array( [ child.center[1] for child in children ] )
width = amax(wr)-amin(wr)
height = amax(wc)-amin(wc)
cwr = mean(wr)
cwc = mean(wc)
this.setboxprops( cwr,cwc,width,height,this.res,sc=sc )
elif num_solutions == 1:
# construcut the center location
k = 0 # there should be only one local min
kr = mins[1][k]
kc = mins[0][k]
wr = this.wr_range[ kr ]
wc = this.wc_range[ kc ]
# retrieve associated separation constant
sc = this.__scgrid__[kr,kc]
# Recenter the box on the current min
this.setboxprops(wr,wc,this.width,this.height,this.res,sc=sc)
else:
#
if len(this.__jf__)>3:
alert('Invalid number of local minima found: %s.'% (magenta(bold('%s'%num_solutions))), 'splitcenter' )
# Use the extrapolated values as a guess?
alert(yellow('Now trying to use extrapolation, wrather than grid guess, to center the current box.'),'splitcenter')
#
guess = this.guess(this.__jf__[-1],gridguess=[1.0,1.0,4.0,1.0])
wr,wc,cr,cc = guess[0],guess[1],guess[2],guess[3]
sc = cr+1j*cc
# Recenter the box on the current min
this.setboxprops(wr,wc,this.width,this.height,this.res,sc=sc)
else:
warning('Invalid number of local minima found: %s. This box will be removed. NOTE that this may not be what you want, and further inspection may be warranted.'% (magenta(bold('%s'%num_solutions))), 'splitcenter' )
this.__removeme__ = True
# Validate children: Remove duplicates
def validatechildren(this):
#
from numpy import linalg,array
from kerr import alert,yellow,cyan,blue,magenta
tol = 1e-5
#
if not this.isfundamental():
#
children = this.collectchildren()
initial_count = len(children)
# Remove identical twins
for a,tom in enumerate( children ):
for b,tim in enumerate( children ):
if b>a:
if linalg.norm(array(tom.center)-array(tim.center)) < tol:
tim.parent.children.remove(tim)
del tim
break
# Remove overtones over the max label
if this.__maxn__ is not None:
for k,child in enumerate(this.collectchildren()):
if child.__label__[2] > this.__maxn__:
if this.verbose:
msg = 'Removing overtone '+yellow('%s'%list(child.__label__))+' becuase its label is higher than the allowed value specified.'
alert(msg,'validatechildren')
this.label.pop( child.__label__ , None)
child.parent.children.remove(child)
del child
# Remove all boxes marked for deletion
for child in this.collectchildren():
if child.__removeme__:
this.label.pop( child.__label__, None )
child.parent.children.remove( child )
del child
#
final_count = len( this.collectchildren() )
#
if this.verbose:
if final_count != initial_count:
alert( yellow('%i children have been removed, and %i remain.') % (-final_count+initial_count,final_count) ,'validatechildren')
else:
alert( 'All children have been deemed valid.', 'validatechildren' )
# Method for collecting all fundamental children
def collectchildren(this,children=None):
#
if children is None:
children = []
#
if this.isfundamental():
children.append(this)
else:
for child in [ k for k in this.children if k is not this ]:
children += child.collectchildren()
#
return children
# Method to plot solutions
def plot(this,fig=None,show=False,showlabel=False):
#
from numpy import array,amin,amax,sign
from matplotlib.pyplot import plot,xlim,ylim,xlabel,ylabel,title,figure,gca,text
from matplotlib.pyplot import show as show_
#
children = this.collectchildren()
wr = array( [ child.center[0] for child in children ] )
wc =-array( [ child.center[1] for child in children ] )
wr_min,wr_max = amin(wr),amax(wr)
wc_min,wc_max = amin(wc),amax(wc)
padscale = 0.15
padr,padc = 1.5*padscale*(wr_max-wr_min), padscale*(wc_max-wc_min)
wr_min -= padr; wr_max += padr
wc_min -= padc; wc_max += padc
#
if fig is None:
# fig = figure( figsize=12*array((wr_max-wr_min, wc_max-wc_min))/(wr_max-wr_min), dpi=200, facecolor='w', edgecolor='k' )
fig = figure( figsize=12.0*array((4.5, 3))/4.0, dpi=200, facecolor='w', edgecolor='k' )
#
xlim( [wr_min,wr_max] )
ylim( [wc_min,wc_max] )
ax = gca()
#
for child in children:
plot( child.center[0],-child.center[1], '+k', ms=10 )
ax.add_patch( child.patch )
if showlabel:
text( child.center[0]+sign(child.center[0])*child.width/2,-(child.center[1]+child.height/2),
'$(%i,%i,%i,%i)$'%(this.label[child]),
ha=('right' if sign(child.center[0])<0 else 'left' ),
fontsize=10,
alpha=0.9 )
#
xlabel(r'$\mathrm{re}\;\tilde\omega_{%i%i}$'%(this.l,this.m))
ylabel(r'-$\mathrm{im}\;\tilde\omega_{%i%i}$'%(this.l,this.m))
title(r'$j_f = %1.6f$'%this.__jf__[-1],fontsize=18)
#
if show: show_()
# ************************************************************* #
# Solve leaver's equations in a given box=[wr_range,wc_range]
# NOTE that the box is a list, not an array
# ************************************************************* #
def lvrgridsolve(this,jf=0,fullopt=False):
# Import maths
from numpy import linalg,complex128,ones,array
from kerr.pttools import scberti
from kerr.pttools import leaver_workfunction
from scipy.optimize import fmin,root
import sys
# Pre-allocate an array that will hold work function values
x = ones( ( this.wc_range.size,this.wr_range.size ) )
# Pre-allocate an array that will hold sep const vals
scgrid = ones( ( this.wc_range.size,this.wc_range.size ), dtype=complex128 )
# Solve over the grid
for i,wr in enumerate( this.wr_range ):
for j,wc in enumerate( this.wc_range ):
# Costruct the complex frequency for this i and j
cw = complex128( wr+1j*wc )
# # Define the intermediate work function to be used for this iteration
# fun = lambda SC: linalg.norm( array(leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )) )
# # For this complex frequency, optimize over separation constant using initial guess
# SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
# SC0 = [SC0_.real,SC0_.imag]
# X = fmin( fun, SC0, disp=False, full_output=True, maxiter=1 )
# # Store work function value
# x[j][i] = X[1]
# # Store sep const vals
# scgrid[j][i] = X[0][0] + 1j*X[0][1]
if fullopt is False:
# Define the intermediate work function to be used for this iteration
fun = lambda SC: linalg.norm( array(leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )) )
# For this complex frequency, optimize over separation constant using initial guess
SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
SC0 = [SC0_.real,SC0_.imag]
# Store work function value
x[j][i] = fun(SC0)
# Store sep const vals
scgrid[j][i] = SC0_
else:
SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
SC0 = [SC0_.real,SC0_.imag,0,0]
#cfun = lambda Y: [ Y[0]+abs(Y[3]), Y[1]+abs(Y[2]) ]
fun = lambda SC:leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )
X = root( fun, SC0 )
scgrid[j][i] = X.x[0]+1j*X.x[1]
x[j][i] = linalg.norm( array(X.fun) )
if this.verbose:
sys.stdout.flush()
print '.',
if this.verbose: print 'Done.'
# return work function values AND the optimal separation constants
return x,scgrid
# Convert output of localmin to a state vector for minimization
def grids2states(this):
#
from numpy import complex128
state = []
#
for k in range( len(this.__localmin__[0]) ):
#
kr,kc = this.__localmin__[1][k], this.__localmin__[0][k]
cw = complex128( this.wr_range[kr] + 1j*this.wc_range[kc] )
sc = complex128( this.__scgrid__[kr,kc] )
#
state.append( [cw.real,cw.imag,sc.real,sc.imag] )
#
return state
# Get guess either from local min, or from extrapolation of past data
def guess(this,jf,gridguess=None):
#
from kerr.pttools import leaver_workfunction
from kerr.basics import alert,magenta,apolyfit
from kerr import localmins
from numpy import array,linalg,arange,complex128,allclose,nan
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Get a guess from the localmin
if gridguess is None:
this.__x__,this.__scgrid__ = this.lvrgridsolve(jf,fullopt=True)
this.__localmin__ = localmins(this.__x__,edge_ignore=True)
guess1 = this.grids2states()[0]
else:
guess1 = gridguess
# Get a guess from extrapolation ( performed in curate() )
guess2 = [ v for v in guess1 ]
if this.mapcount > 3:
# if there are three map points, try to use polynomial fitting to determine the state at the current jf value
nn = len(this.__data__['jf'])
order = min(2,nn)
#
xx = array(this.__data__['jf'])[-4:]
#
yy = array(this.__data__['cw'])[-4:]
yr = apolyfit( xx, yy.real, order )(jf)
yc = apolyfit( yy.real, yy.imag, order )(yr)
cw = complex128( yr + 1j*yc )
#
zz = array(this.__data__['sc'])[-4:]
zr = apolyfit( xx, zz.real, order )(jf)
zc = apolyfit( zz.real, zz.imag, order )(zr)
sc = complex128( zr + 1j*zc )
#
guess2 = [ cw.real, cw.imag, sc.real, sc.imag ]
# Determine the best guess
if not ( allclose(guess1,guess2) ):
x1 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess1 ) )
x2 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess2 ) )
alert(magenta('The function value at guess from grid is: %s'%x1),'guess')
alert(magenta('The function value at guess from extrap is: %s'%x2),'guess')
if x2 is nan:
x2 = 100.0*x1
if x1<x2:
guess = guess1
alert(magenta('Using the guess from the grid.'),'guess')
else:
guess = guess2
alert(magenta('Using the guess from extrapolation.'),'guess')
else:
x1 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess1 ) )
guess = guess1
alert(magenta('The function value at guess from grid is %s'%x1),'guess')
# Return the guess solution
return guess
# Determine whether the current box contains a complex frequency given an iterable whose first two entries are the real and imag part of the complex frequency
def contains(this,guess):
#
cwrmin = min( this.limit[:2] )
cwrmax = max( this.limit[:2] )
cwcmin = min( this.limit[2:] )
cwcmax = max( this.limit[2:] )
#
isin = True
isin = isin and ( guess[0]<cwrmax )
isin = isin and ( guess[0]>cwrmin )
isin = isin and ( guess[1]<cwcmax )
isin = isin and ( guess[1]>cwcmin )
#
return isin
# Try solving the 4D equation near a single guess value [ cw.real cw.imag sc.real sc.imag ]
def lvrsolve(this,jf,guess,tol=1e-8):
# Import Maths
from numpy import log,exp,linalg,array
from scipy.optimize import root,fmin,minimize
from kerr.pttools import leaver_workfunction
from kerr import alert,red
# Try using root
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log( 1.0 + abs(array(leaver_workfunction( jf,this.l,this.m, STATE ))) )
X = root( fun, guess, tol=tol )
cw1,sc1 = X.x[0]+1j*X.x[1], X.x[2]+1j*X.x[3]
__lvrfmin1__ = linalg.norm(array( exp(X.fun)-1.0 ))
retry1 = ( 'not making good progress' in X.message.lower() ) or ( 'error' in X.message.lower() )
# Try using fmin
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log(linalg.norm( leaver_workfunction( jf,this.l,this.m, STATE ) ))
X = fmin( fun, guess, disp=False, full_output=True, ftol=tol )
cw2,sc2 = X[0][0]+1j*X[0][1], X[0][2]+1j*X[0][3]
__lvrfmin2__ = exp(X[1])
retry2 = this.__lvrfmin__ > 1e-3
# Use the solution that converged the fastest to avoid solutions that have wandered significantly from the initial guess OR use the solution with the smallest fmin
if __lvrfmin1__ < __lvrfmin2__ : # use the fmin value for convenience
cw,sc,retry = cw1,sc1,retry1
__lvrfmin__ = __lvrfmin1__
else:
cw,sc,retry = cw2,sc2,retry2
__lvrfmin__ = __lvrfmin2__
if not this.contains( [cw.real,cw.imag] ):
alert(red('Trial solution found to be outside of box. I will now try to use a bounded solver, but the performance may be suboptimal.'),'lvrsolve')
s = 2.0
cwrmin = min( this.center[0]-this.width/s, this.center[0]+this.width/s )
cwrmax = max( this.center[0]-this.width/s, this.center[0]+this.width/s )
cwcmin = min( this.center[1]-this.height/s, this.center[1]+this.height/s )
cwcmax = max( this.center[1]-this.height/s, this.center[1]+this.height/s )
scrmin = min( this.__sc__.real-this.width/s, this.__sc__.real+this.width/s )
scrmax = max( this.__sc__.real-this.width/s, this.__sc__.real+this.width/s )
sccmin = min( this.__sc__.imag-this.height/s, this.__sc__.imag+this.height/s )
sccmax = max( this.__sc__.imag-this.height/s, this.__sc__.imag+this.height/s )
bounds = [ (cwrmin,cwrmax), (cwcmin,cwcmax), (scrmin,scrmax), (sccmin,sccmax) ]
# Try using minimize
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log(linalg.norm( leaver_workfunction( jf,this.l,this.m, STATE ) ))
X = minimize( fun, guess, options={'disp':False}, tol=tol, bounds=bounds )
cw,sc = X.x[0]+1j*X.x[1], X.x[2]+1j*X.x[3]
__lvrfmin__ = exp(X.fun)
# Always retry if the solution is outside of the box
if not this.contains( [cw.real,cw.imag] ):
retry = True
alert(red('Retrying because the trial solution is outside of the box.'),'lvrsolve')
# Don't retry if fval is small
if __lvrfmin__ > 1e-3:
retry = True
alert(red('Retrying because the trial fmin value is greater than 1e-3.'),'lvrsolve')
# Don't retry if fval is small
if retry and (__lvrfmin__ < 1e-4):
retry = False
alert(red('Not retrying becuase the fmin value is low.'),'lvrsolve')
# Return the solution
return cw,sc,__lvrfmin__,retry
# Given a box's children, resize the boxes relative to child locations: no boxes overlap
def sensescale(this):
#
from numpy import array,inf,linalg,sqrt
from kerr import alert
#
children = this.collectchildren()
# Let my people know.
if this.verbose:
alert('Sensing the scale of the current object\'s sub-boxes.','sensescale')
# Determine the distance between this min, and its closest neighbor
scalar = sqrt(2) if (not this.__smallboxes__) else 2.0*sqrt(2.0)
for tom in children:
d = inf
for jerry in [ kid for kid in children if kid is not tom ]:
r = array(tom.center)
r_= array(jerry.center)
d_= linalg.norm(r_-r)
if d_ < d:
d = d_
# Use the smallest distance found to determine a box size
s = d/scalar
width = s; height = s; res = int( max( 20, 1.5*float(this.res)/len(children) ) ) if (len(children)>1) else this.res
# Define the new box size for this child
tom.setboxprops( tom.center[0], tom.center[1], width, height, res )
| mit |
jagill/treeano | examples/sparsity_penalty/mnist_mlp.py | 3 | 3782 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import itertools
import numpy as np
import sklearn.datasets
import sklearn.cross_validation
import sklearn.metrics
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
from treeano.sandbox.nodes import kl_sparsity_penalty as sp
fX = theano.config.floatX
# ############################### prepare data ###############################
mnist = sklearn.datasets.fetch_mldata('MNIST original')
# theano has a constant float type that it uses (float32 for GPU)
# also rescaling to [0, 1] instead of [0, 255]
X = mnist['data'].astype(fX) / 255.0
y = mnist['target'].astype("int32")
X_train, X_valid, y_train, y_valid = sklearn.cross_validation.train_test_split(
X, y, random_state=42)
in_train = {"x": X_train, "y": y_train}
in_valid = {"x": X_valid, "y": y_valid}
# ############################## prepare model ##############################
model = tn.HyperparameterNode(
"model",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(None, 28 * 28)),
tn.DenseNode("fc1"),
tn.SigmoidNode("sigmoid1"),
sp.AuxiliaryKLSparsityPenaltyNode("sp1", cost_weight=1e1),
tn.DropoutNode("do1"),
tn.DenseNode("fc2"),
tn.SigmoidNode("sigmoid2"),
sp.AuxiliaryKLSparsityPenaltyNode("sp2", cost_weight=1e1),
tn.DropoutNode("do2"),
tn.DenseNode("fc3", num_units=10),
tn.SoftmaxNode("pred"),
tn.TotalCostNode(
"cost",
{"pred": tn.IdentityNode("pred_id"),
"target": tn.InputNode("y", shape=(None,), dtype="int32")},
cost_function=treeano.utils.categorical_crossentropy_i32),
tn.InputElementwiseSumNode("total_cost")]),
num_units=512,
sparsity=0.1,
cost_reference="total_cost",
dropout_probability=0.5,
inits=[treeano.inits.XavierNormalInit()],
)
with_updates = tn.HyperparameterNode(
"with_updates",
tn.AdamNode(
"adam",
{"subtree": model,
"cost": tn.ReferenceNode("cost_ref", reference="total_cost")}),
)
network = with_updates.network()
network.build() # build eagerly to share weights
BATCH_SIZE = 500
valid_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="valid_time"),
canopy.handlers.override_hyperparameters(dropout_probability=0),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"total_cost": "total_cost", "pred": "pred"})
def validate(in_dict, results_dict):
valid_out = valid_fn(in_valid)
probabilities = valid_out["pred"]
predicted_classes = np.argmax(probabilities, axis=1)
results_dict["valid_cost"] = valid_out["total_cost"]
results_dict["valid_time"] = valid_out["valid_time"]
results_dict["valid_accuracy"] = sklearn.metrics.accuracy_score(
y_valid, predicted_classes)
train_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="total_time"),
canopy.handlers.call_after_every(1, validate),
canopy.handlers.time_call(key="train_time"),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"train_cost": "total_cost",
"train_sp_cost1": "sp1_sendto",
"train_sp_cost2": "sp2_sendto",
"train_classification_cost": "cost"},
include_updates=True)
# ################################# training #################################
print("Starting training...")
canopy.evaluate_until(fn=train_fn,
gen=itertools.repeat(in_train),
max_iters=25)
| apache-2.0 |
matbra/bokeh | bokeh/compat/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
zihua/scikit-learn | sklearn/metrics/classification.py | 3 | 71852 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
poryfly/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
liberatorqjw/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
scharch/SONAR | tests/run_tests.py | 1 | 3419 | #!/usr/bin/env python3
#clean up function
def cleanup():
shutil.rmtree( "output", ignore_errors=True )
shutil.rmtree( "work", ignore_errors=True)
try:
os.remove("f0_merged.fq")
os.remove("derepAllRawSeqs.uc")
os.remove("lineage.fa")
except:
pass
#test imports (it's a bit silly to include builtins in this list...)
import importlib, sys
for checkModule in ['airr', 'atexit', 'Bio', 'collections', 'colorsys', 'csv', 'datetime', 'docopt', 'ete3', 'fileinput', 'functools', 'fuzzywuzzy', 'glob', 'gzip', 'io', 'itertools', 'Levenshtein', 'math', 'multiprocessing', 'numpy', 'os', 'pandas', 'pickle', 'PyQt4.QtGui', 'random', 're', 'shutil', 'statistics', 'string', 'subprocess', 'sys', 'time', 'traceback']:
try:
my_module = importlib.import_module(checkModule)
except ImportError:
sys.exit(f"module {checkModule} not found")
#check Biopython submodules (not sure this is necessary)
for submodule in ['Align', 'AlignIO', 'Alphabet', 'Data.CodonTable', 'Phylo', 'Seq', 'SeqIO', 'SeqRecord']:
try:
my_module = importlib.import_module(f"Bio.{submodule}", package="Bio")
except ImportError:
sys.exit(f"module Bio.{submodule} not found")
#check for master script
import shutil
if not shutil.which('sonar'):
print("Master script not found in PATH, programs will be invoked directly", file=sys.stderr)
#test SONAR scripts
import os, subprocess
SONARDIR = os.path.abspath(sys.argv[0]).split("SONAR/tests")[0]
os.chdir(f"{SONARDIR}/SONAR/tests")
for command in [ [f"{SONARDIR}/SONAR/annotate/1.0-preprocess.py", "--input", "subsample_r1.fq.gz", "--reverse", "subsample_r2.fq.gz"],
[f"{SONARDIR}/SONAR/annotate/1.1-blast_V.py", "--fasta", "f0_merged.fq", "--derep", "--npf", "2000", "--threads", "2"],
[f"{SONARDIR}/SONAR/annotate/1.2-blast_J.py"],
[f"{SONARDIR}/SONAR/annotate/1.3-finalize_assignments.py"],
[f"{SONARDIR}/SONAR/annotate/1.4-cluster_sequences.py"],
[f"{SONARDIR}/SONAR/lineage/2.1-calculate_id-div.py"],
[f"{SONARDIR}/SONAR/plotting/4.1-setup_plots.pl", "--statistic", "div"],
[f"{SONARDIR}/SONAR/lineage/2.4-cluster_into_groups.py"],
[f"{SONARDIR}/SONAR/utilities/getReadsByAnnotation.py", "-f", "output/sequences/nucleotide/tests_goodVJ_unique_lineageNotations.fa", "-a", "clone_id=000(01|07|08)", "-o", "lineage.fa"],
[f"{SONARDIR}/SONAR/phylogeny/3.2-run_IgPhyML.py", "-v", "IGHV4-39*01", "--seqs", "lineage.fa", "--quick", "--seed", "321325749"],
[f"{SONARDIR}/SONAR/utilities/flipTree.pl", "output/tests_igphyml.tree", "output/tests_igphyml.flipped.tree"] ]:
s=subprocess.Popen( command, universal_newlines=True, stderr=subprocess.PIPE )
o,e = s.communicate()
if s.returncode != 0:
cleanup()
sys.exit( f"Received error \"{e.strip()}\" running {command[0].split('/')[-1]}" )
#validate rearrangements output
#at the moment, it seems like IgPhyML output varies a bit even with a specified seed, so skip that test for now.
import hashlib
checksums = { "output/tables/tests_rearrangements.tsv":"f9065c38ae0f5713ccc68cade8592183" }#, "output/sequences/nucleotide/tests_inferredAncestors.fa":"46b03fc95afec5fda950f048d99db34b"}
for toValidate in checksums:
h = hashlib.md5()
with open(toValidate, 'rb') as handle:
buf=handle.read()
h.update(buf)
if not h.hexdigest() == checksums[toValidate]:
cleanup()
sys.exit( f"{toValidate} failed validation!")
#All done!
cleanup()
print("All tests passed!")
| gpl-3.0 |
gregcaporaso/qiime | qiime/compare_distance_matrices.py | 15 | 11259 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Michael Dwan", "Logan Knecht",
"Damien Coy", "Levi McCracken", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
from os import path
from skbio.stats import p_value_to_str
from skbio.stats.distance import DistanceMatrix, mantel
from qiime.util import make_compatible_distance_matrices
from qiime.stats import MantelCorrelogram, PartialMantel
def run_mantel_test(method, fps, distmats, num_perms, tail_type, comment,
control_dm_fp=None, control_dm=None,
sample_id_map=None):
"""Runs a Mantel test on all pairs of distance matrices.
Returns a string suitable for writing out to a file containing the results
of the test.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
method - which Mantel test to run (either 'mantel' or 'partial_mantel')
fps - list of filepaths of the distance matrices
distmats - list of tuples containing dm labels and dm data (i.e. the
output of parse_distmat)
num_perms - the number of permutations to use to calculate the
p-value(s)
tail_type - the type of tail test to use when calculating the
p-value(s). Can be 'two-sided', 'greater', or 'less'. Only applies
when method is mantel
comment - comment string to add to the beginning of the results string
control_dm_fp - filepath of the control distance matrix. Only applies
when method is partial_mantel (it is required then)
control_dm - tuple containing control distance matrix labels and matrix
data. Only applies when method is partial_mantel (it is required
then)
sample_id_map - dict mapping sample IDs (i.e. what is expected by
make_compatible_distance_matrices)
"""
if len(fps) != len(distmats):
raise ValueError("Must provide the same number of filepaths as there "
"are distance matrices.")
if comment is None:
comment = ''
result = comment
if method == 'mantel':
result += 'DM1\tDM2\tNumber of entries\tMantel r statistic\t' + \
'p-value\tNumber of permutations\tTail type\n'
elif method == 'partial_mantel':
if not control_dm_fp or not control_dm:
raise ValueError("You must provide a control matrix filepath and "
"control matrix when running the partial Mantel "
"test.")
result += 'DM1\tDM2\tCDM\tNumber of entries\t' + \
'Mantel r statistic\tp-value\tNumber of permutations\t' +\
'Tail type\n'
else:
raise ValueError("Invalid method '%s'. Must be either 'mantel' or "
"'partial_mantel'." % method)
# Loop over all pairs of dms.
for i, (fp1, (dm1_labels, dm1_data)) in enumerate(zip(fps, distmats)):
for fp2, (dm2_labels, dm2_data) in zip(fps, distmats)[i + 1:]:
# Make the current pair of distance matrices compatible by only
# keeping samples that match between them, and ordering them by
# the same sample IDs.
(dm1_labels, dm1_data), (dm2_labels, dm2_data) = \
make_compatible_distance_matrices((dm1_labels, dm1_data),
(dm2_labels, dm2_data), lookup=sample_id_map)
if method == 'partial_mantel':
# We need to intersect three sets (three matrices).
(dm1_labels, dm1_data), (cdm_labels, cdm_data) = \
make_compatible_distance_matrices(
(dm1_labels, dm1_data), control_dm,
lookup=sample_id_map)
(dm1_labels, dm1_data), (dm2_labels, dm2_data) = \
make_compatible_distance_matrices(
(dm1_labels, dm1_data), (dm2_labels, dm2_data),
lookup=sample_id_map)
if len(dm1_labels) < 3:
result += '%s\t%s\t%s\t%d\tToo few samples\n' % (fp1,
fp2, control_dm_fp, len(dm1_labels))
continue
elif len(dm1_labels) < 3:
result += '%s\t%s\t%d\tToo few samples\n' % (fp1, fp2,
len(dm1_labels))
continue
dm1 = DistanceMatrix(dm1_data, dm1_labels)
dm2 = DistanceMatrix(dm2_data, dm2_labels)
if method == 'mantel':
corr_coeff, p_value, n = mantel(dm1, dm2, method='pearson',
permutations=num_perms, alternative=tail_type,
strict=True)
p_str = p_value_to_str(p_value, num_perms)
result += "%s\t%s\t%d\t%.5f\t%s\t%d\t%s\n" % (
fp1, fp2, n, corr_coeff, p_str, num_perms, tail_type)
elif method == 'partial_mantel':
cdm = DistanceMatrix(cdm_data, cdm_labels)
results = PartialMantel(dm1, dm2, cdm)(num_perms)
p_str = p_value_to_str(results['mantel_p'], num_perms)
result += "%s\t%s\t%s\t%d\t%.5f\t%s\t%d\t%s\n" % (
fp1, fp2, control_dm_fp, len(dm1_labels),
results['mantel_r'], p_str, num_perms, 'greater')
return result
def run_mantel_correlogram(fps, distmats, num_perms, comment, alpha,
sample_id_map=None,
variable_size_distance_classes=False):
"""Runs a Mantel correlogram analysis on all pairs of distance matrices.
Returns a string suitable for writing out to a file containing the results
of the test, a list of correlogram filepath names, and a list of matplotlib
Figure objects representing each correlogram.
The correlogram filepaths can have an extension string appended to the end
of them and then be used to save each of the correlogram Figures to a file.
Each correlogram filepath will be a combination of the two distance matrix
filepaths that were used to create it.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
fps - list of filepaths of the distance matrices
distmats - list of tuples containing dm labels and dm data (i.e. the
output of parse_distmat)
num_perms - the number of permutations to use to calculate the
p-value(s)
comment - comment string to add to the beginning of the results string
alpha - the alpha value to use to determine significance in the
correlogram plots
sample_id_map - dict mapping sample IDs (i.e. what is expected by
make_compatible_distance_matrices)
variable_size_distance_classes - create distance classes that vary in
size (i.e. width) but have the same number of distances in each
class
"""
if len(fps) != len(distmats):
raise ValueError("Must provide the same number of filepaths as there "
"are distance matrices.")
if comment is None:
comment = ''
result = comment + 'DM1\tDM2\tNumber of entries\t' + \
'Number of permutations\tClass index\t' + \
'Number of distances\tMantel r statistic\t' + \
'p-value\tp-value (Bonferroni corrected)\tTail type\n'
correlogram_fps = []
correlograms = []
# Loop over all pairs of dms.
for i, (fp1, (dm1_labels, dm1_data)) in enumerate(zip(fps, distmats)):
for fp2, (dm2_labels, dm2_data) in zip(fps, distmats)[i + 1:]:
# Make the current pair of distance matrices compatible by only
# keeping samples that match between them, and ordering them by
# the same sample IDs.
(dm1_labels, dm1_data), (dm2_labels, dm2_data) = \
make_compatible_distance_matrices((dm1_labels, dm1_data),
(dm2_labels, dm2_data), lookup=sample_id_map)
if len(dm1_labels) < 3:
result += '%s\t%s\t%d\tToo few samples\n' % (fp1, fp2,
len(dm1_labels))
continue
dm1 = DistanceMatrix(dm1_data, dm1_labels)
dm2 = DistanceMatrix(dm2_data, dm2_labels)
# Create an instance of our Mantel correlogram test and run it with
# the specified number of permutations.
mc = MantelCorrelogram(dm1, dm2, alpha=alpha,
variable_size_distance_classes=variable_size_distance_classes)
results = mc(num_perms)
# Generate a name for the current correlogram and save it and the
# correlogram itself.
dm1_name = path.basename(fp1)
dm2_name = path.basename(fp2)
correlogram_fps.append('_'.join((dm1_name, 'AND', dm2_name,
'mantel_correlogram')) + '.')
correlograms.append(results['correlogram_plot'])
# Iterate over the results and write them to the text file.
first_time = True
for class_idx, num_dist, r, p, p_corr in zip(
results['class_index'], results['num_dist'],
results['mantel_r'], results['mantel_p'],
results['mantel_p_corr']):
# Format p-values and figure out which tail type we have based
# on the sign of r.
p_str = None
if p is not None:
p_str = p_value_to_str(p, num_perms)
p_corr_str = None
if p_corr is not None:
p_corr_str = p_value_to_str(p_corr, num_perms)
if r is None:
tail_type = None
elif r < 0:
tail_type = 'less'
else:
tail_type = 'greater'
if first_time:
result += '%s\t%s\t%d\t%d\t%s\t%d\t%s\t%s\t%s\t%s\n' % (
fp1, fp2, len(dm1_labels), num_perms, class_idx,
num_dist, r, p_str, p_corr_str, tail_type)
first_time = False
else:
result += '\t\t\t\t%s\t%d\t%s\t%s\t%s\t%s\n' % (class_idx,
num_dist, r, p_str, p_corr_str, tail_type)
return result, correlogram_fps, correlograms
| gpl-2.0 |
wk8910/bio_tools | 01.dadi_fsc/02.two_pop_model/05.Mpre/01.model.py | 1 | 2604 | #! /usr/bin/env python
import os,sys,re
# import matplotlib
# matplotlib.use('Agg')
import numpy
import sys
from numpy import array
# import pylab
import dadi
spectrum_file = sys.argv[1]
data = dadi.Spectrum.from_file(spectrum_file)
data = data.fold()
ns = data.sample_sizes
pts_l = [40,50,60]
# params = (nuPre,TPre,s,nu1,nu2,T,tx,m12,m21)
from dadi import Numerics, PhiManip, Integration
from dadi.Spectrum_mod import Spectrum
def simple_mig(params, ns, pts):
"""
params = (nuPre,TPre,nu1,nu2,T,m12,m21)
ns = (n1,n2)
Simple migration model, the population size is constant
nuPre: Size after first size change
TPre: Time before split of first size change.
nu1: size of pop 1.
nu2: size of pop 2.
T1: Time from divergence to migration end (in units of 2*Na generations)
T2: Time from migration end to present
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nuPre,TPre,nu1,nu2,T,m12,m21 = params
xx = Numerics.default_grid(pts)
phi = PhiManip.phi_1D(xx)
phi = Integration.one_pop(phi, xx, TPre, nu=nuPre)
phi = PhiManip.phi_1D_to_2D(xx, phi)
phi = Integration.two_pops(phi, xx, T, nu1, nu2, m12=m12, m21=m21)
fs = Spectrum.from_phi(phi, ns, (xx,xx))
return fs
func = simple_mig
# nuPre,TPre,nu1,nu2,T,M.m12,M.m21
upper_bound = [10, 1, 10, 10, 1, 10, 10]
lower_bound = [1e-2, 1e-2, 1e-2, 0, 0, 0, 0]
p0 = [3.58393534462, 0.486773940004, 0.823557413109, 0.322669721201, 0.20555616477, 1, 1]
func_ex = dadi.Numerics.make_extrap_log_func(func)
p0 = dadi.Misc.perturb_params(p0, fold=1, upper_bound=upper_bound, lower_bound=lower_bound)
print('Beginning optimization ************************************************')
popt = dadi.Inference.optimize_log(p0, data, func_ex, pts_l,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=len(p0), maxiter=30)
print('Finshed optimization **************************************************')
print('Best-fit parameters: {0}'.format(popt))
model = func_ex(popt, ns, pts_l)
ll_model = dadi.Inference.ll_multinom(model, data)
print('Maximum log composite likelihood: {0}\n'.format(ll_model))
theta = dadi.Inference.optimal_sfs_scaling(model, data)
print('Optimal value of theta: {0}\n'.format(theta))
result=[ll_model,theta]+popt.tolist()
print('###DADIOUTPUT###')
# nuPre,TPre,nu1,nu2,T,M.m12,M.m21
print('likelihood\ttheta\tN.nuPre\tT.Tpre\tN.nu1\tN.nu2\tT.T\tM.m12\tM.m21')
print("\t".join(map(str,result)))
| mpl-2.0 |
ArmstrongYang/StudyShare | Spark-Python/Spark_ML.py | 1 | 1339 | from pyspark import SparkContext, SparkConf
'''
spark机器学习笔记:(二)用Spark Python进行数据处理和特征提取
http://blog.csdn.net/u013719780/article/details/51768720
'''
appName = 'spark-ml'
master = 'local'
scconf = SparkConf().setAppName(appName).setMaster(master)
sc = SparkContext(conf=scconf)
def plot():
import matplotlib.pyplot as plt
from matplotlib.pyplot import hist
ages = user_fields.map(lambda x: int(x[1])).collect()
hist(ages, bins=20, color='lightblue',normed=True)
fig = plt.gcf()
fig.set_size_inches(12,6)
plt.show()
if __name__ == '__main__':
print(__file__)
file_name = "ml-100k/u.user"
user_data = sc.textFile(file_name)
print(user_data.first())
user_fields = user_data.map(lambda line: line.split('|'))
num_users = user_fields.map(lambda fields: fields[0]).count() #统计用户数
num_genders = user_fields.map(lambda fields : fields[2]).distinct().count() #统计性别个数
num_occupations = user_fields.map(lambda fields: fields[3]).distinct().count() #统计职业个数
num_zipcodes = user_fields.map(lambda fields: fields[4]).distinct().count() #统计邮编个数
print ("Users: %d, genders: %d, occupations: %d, ZIP codes: %d"%(num_users,num_genders,num_occupations,num_zipcodes))
plot()
exit(0) | apache-2.0 |
cbertinato/pandas | pandas/tseries/holiday.py | 1 | 16245 | from datetime import datetime, timedelta
from typing import List
import warnings
from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa
import numpy as np
from pandas.errors import PerformanceWarning
from pandas import DateOffset, Series, Timestamp, date_range
from pandas.tseries.offsets import Day, Easter
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday:
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('Memorial Day', month=5, day=31,
offset=pd.DateOffset(weekday=MO(-1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=pd.DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year={year}, '.format(year=self.year)
info += 'month={mon}, day={day}, '.format(mon=self.month, day=self.day)
if self.offset is not None:
info += 'offset={offset}'.format(offset=self.offset)
if self.observance is not None:
info += 'observance={obs}'.format(obs=self.observance)
repr = 'Holiday: {name} ({info})'.format(name=self.name, info=info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = date_range(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except AttributeError:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super().__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass):
"""
Abstract interface to create holidays following certain rules.
"""
rules = [] # type: List[Holiday]
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super().__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar {name} does not have any '
'rules specified'.format(name=self.name))
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except AttributeError:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except AttributeError:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('Memorial Day', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Martin Luther King Jr. Day',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('Presidents Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
| bsd-3-clause |
rabipanda/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
tzk/EDeN | eden/__init__.py | 2 | 2422 | #!/usr/bin/env python
"""Provides interface for vectorizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dill
from sklearn.base import BaseEstimator, TransformerMixin
__author__ = "Fabrizio Costa"
__copyright__ = "Copyright 2015, Fabrizio Costa"
__credits__ = ["Fabrizio Costa", "Bjoern Gruening"]
__license__ = "MIT"
__version__ = "v2.0.1202"
__maintainer__ = "Fabrizio Costa"
__email__ = "[email protected]"
__status__ = "Production"
__magic__ = ('EDeN', 42)
__magic_py2hash__ = -7048895691955021301
__magic_py3hash__ = -1821860980875793120
_bitmask_ = 4294967295
class AbstractVectorizer(BaseEstimator, TransformerMixin):
"""Interface declaration for the Vectorizer class."""
def annotate(self, graphs, estimator=None, reweight=1.0, relabel=False):
raise NotImplementedError("Should have implemented this")
def set_params(self, **args):
raise NotImplementedError("Should have implemented this")
def transform(self, graphs):
raise NotImplementedError("Should have implemented this")
def vertex_transform(self, graph):
raise NotImplementedError("Should have implemented this")
def run_dill_encoded(what):
"""Use dill as replacement for pickle to enable multiprocessing on instance methods"""
fun, args = dill.loads(what)
return fun(*args)
def apply_async(pool, fun, args, callback=None):
"""
Wrapper around apply_async() from multiprocessing, to use dill instead of pickle.
This is a workaround to enable multiprocessing of classes.
"""
return pool.apply_async(run_dill_encoded, (dill.dumps((fun, args)),), callback=callback)
def fast_hash_2(dat_1, dat_2, bitmask=_bitmask_):
return int(hash((dat_1, dat_2)) & bitmask) + 1
def fast_hash_3(dat_1, dat_2, dat_3, bitmask=_bitmask_):
return int(hash((dat_1, dat_2, dat_3)) & bitmask) + 1
def fast_hash_4(dat_1, dat_2, dat_3, dat_4, bitmask=_bitmask_):
return int(hash((dat_1, dat_2, dat_3, dat_4)) & bitmask) + 1
def fast_hash(vec, bitmask=_bitmask_):
return int(hash(tuple(vec)) & bitmask) + 1
def fast_hash_vec(vec, bitmask=_bitmask_):
hash_vec = []
running_hash = 0xAAAAAAAA
for i, vec_item in enumerate(vec):
running_hash ^= hash((running_hash, vec_item, i))
hash_vec.append(int(running_hash & bitmask) + 1)
return hash_vec
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.