repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lpenguin/pandas-qt
|
tests/test_CSVDialogs.py
|
4
|
11376
|
# -*- coding: utf-8 -*-
import os
import tempfile
from pandasqt.compat import Qt, QtCore, QtGui
import numpy
import pytest
import pytestqt
from pandasqt.views.CSVDialogs import (
DelimiterValidator, DelimiterSelectionWidget,
CSVImportDialog, CSVExportDialog
)
from pandasqt.models.DataFrameModel import DataFrameModel
FIXTUREDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')
@pytest.fixture()
def csv_file():
return os.path.join(FIXTUREDIR, 'csv_file.csv')
@pytest.fixture()
def tmp(request):
handle, name = tempfile.mkstemp(suffix='.csv')
def _teardown():
os.close(handle)
os.remove(name)
request.addfinalizer(_teardown)
return name
class TestValidator(object):
def test_input(self, qtbot):
widget = QtGui.QLineEdit()
widget.setValidator(DelimiterValidator())
qtbot.addWidget(widget)
widget.show()
qtbot.keyPress(widget, ' ')
assert widget.text() == ''
qtbot.keyPress(widget, 'a')
assert widget.text() == 'a'
class TestDelimiterBox(object):
def test_selections_and_signals(self, qtbot):
box = DelimiterSelectionWidget()
qtbot.addWidget(box)
box.show()
buttons = box.findChildren(QtGui.QRadioButton)
lineedit = box.findChildren(QtGui.QLineEdit)[0]
delimiters = []
for button in buttons:
with qtbot.waitSignal(box.delimiter, 1000):
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
if lineedit.isEnabled():
qtbot.keyPress(lineedit, 'a')
delimiters.append(box.currentSelected())
assert len(delimiters) == 4
for char in [',', ';', '\t', 'a']:
assert char in delimiters
def test_reset(self, qtbot):
box = DelimiterSelectionWidget()
qtbot.addWidget(box)
box.show()
buttons = box.findChildren(QtGui.QRadioButton)
lineedit = box.findChildren(QtGui.QLineEdit)[0]
delimiters = []
for button in buttons:
with qtbot.waitSignal(box.delimiter, 1000):
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
if lineedit.isEnabled():
qtbot.keyPress(lineedit, 'a')
delimiters.append(box.currentSelected())
box.reset()
assert not lineedit.isEnabled()
class TestCSVImportWidget(object):
def test_init(self, qtbot):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
assert csvwidget.isModal()
assert csvwidget.windowTitle() == u'Import CSV'
def test_fileinput(self, qtbot, csv_file):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
labels = csvwidget.findChildren(QtGui.QLabel)
assert labels[0].text() == u'Choose File'
lineedits = csvwidget.findChildren(QtGui.QLineEdit)
qtbot.keyClicks(lineedits[0], csv_file)
assert csvwidget._previewTableView.model() is not None
assert csvwidget._delimiter == u';'
assert csvwidget._header is None
def test_header(self, qtbot):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
assert csvwidget._header == None
checkboxes = csvwidget.findChildren(QtGui.QCheckBox)
checkboxes[0].toggle()
assert csvwidget._header == 0
def test_encoding(self, qtbot):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
comboboxes = csvwidget.findChildren(QtGui.QComboBox)
comboboxes[0]
assert comboboxes[0].itemText(comboboxes[0].currentIndex()) == 'ASCII'
qtbot.mouseClick(comboboxes[0], QtCore.Qt.LeftButton)
qtbot.keyPress(comboboxes[0], QtCore.Qt.Key_Down)
assert csvwidget._encodingKey != 'iso_ir_6'
def test_delimiter(self, qtbot):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
groupboxes = csvwidget.findChildren(QtGui.QGroupBox)
radiobuttons = groupboxes[0].findChildren(QtGui.QRadioButton)
lineedits = groupboxes[0].findChildren(QtGui.QLineEdit)
for button in radiobuttons:
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
if lineedits[0].isEnabled():
qtbot.keyPress(lineedits[0], ' ')
assert lineedits[0].text() == ''
qtbot.keyPress(lineedits[0], 'a')
assert lineedits[0].text() == 'a'
assert csvwidget._delimiter == groupboxes[0].currentSelected()
def test_accept_reject(self, qtbot):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
buttons = csvwidget.findChildren(QtGui.QPushButton)
for button in buttons:
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
assert csvwidget.isVisible() == False
csvwidget.show()
def test_preview(self, qtbot, csv_file):
csvwidget = CSVImportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
labels = csvwidget.findChildren(QtGui.QLabel)
lineedits = csvwidget.findChildren(QtGui.QLineEdit)
qtbot.keyClicks(lineedits[0], csv_file)
groupboxes = csvwidget.findChildren(QtGui.QGroupBox)
radiobuttons = groupboxes[0].findChildren(QtGui.QRadioButton)
lineedits = groupboxes[0].findChildren(QtGui.QLineEdit)
for button in radiobuttons:
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
if lineedits[0].isEnabled():
qtbot.keyPress(lineedits[0], ';')
assert csvwidget._previewTableView.model() is not None
def _assert(x, path):
assert x
assert isinstance(x, DataFrameModel)
assert path
assert isinstance(path, basestring)
csvwidget.load.connect(_assert)
with qtbot.waitSignal(csvwidget.load):
csvwidget.accepted()
class TestCSVExportWidget(object):
def test_init(self, qtbot):
csvwidget = CSVExportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
assert csvwidget.isModal()
assert csvwidget.windowTitle() == u'Export to CSV'
def test_fileoutput(self, qtbot, csv_file):
csvwidget = CSVExportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
labels = csvwidget.findChildren(QtGui.QLabel)
assert labels[0].text() == u'Output File'
lineedits = csvwidget.findChildren(QtGui.QLineEdit)
qtbot.keyClicks(lineedits[0], csv_file)
assert csvwidget._filenameLineEdit.text() == csv_file
def test_header(self, qtbot):
csvwidget = CSVExportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
checkboxes = csvwidget.findChildren(QtGui.QCheckBox)
checkboxes[0].toggle()
assert csvwidget._headerCheckBox.isChecked()
def test_encoding(self, qtbot):
csvwidget = CSVExportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
comboboxes = csvwidget.findChildren(QtGui.QComboBox)
comboboxes[0]
assert comboboxes[0].itemText(comboboxes[0].currentIndex()) == 'UTF_8'
def test_delimiter(self, qtbot):
csvwidget = CSVExportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
groupboxes = csvwidget.findChildren(QtGui.QGroupBox)
radiobuttons = groupboxes[0].findChildren(QtGui.QRadioButton)
lineedits = groupboxes[0].findChildren(QtGui.QLineEdit)
delimiter = None
for button in radiobuttons:
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
if lineedits[0].isEnabled():
qtbot.keyPress(lineedits[0], ' ')
assert lineedits[0].text() == ''
qtbot.keyPress(lineedits[0], 'a')
assert lineedits[0].text() == 'a'
assert delimiter != groupboxes[0].currentSelected()
delimiter = groupboxes[0].currentSelected()
def test_accept_reject(self, qtbot):
csvwidget = CSVExportDialog()
qtbot.addWidget(csvwidget)
csvwidget.show()
buttons = csvwidget.findChildren(QtGui.QPushButton)
for button in buttons:
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
if button.text() == 'Export Data':
assert csvwidget.isVisible() == True
else:
assert csvwidget.isVisible() == False
class TestDateTimeConversion(object):
def test_read_write(self, qtbot, csv_file, tmp):
importWidget = CSVImportDialog()
qtbot.addWidget(importWidget)
importWidget.show()
import_lineedits = importWidget.findChildren(QtGui.QLineEdit)
qtbot.keyClicks(import_lineedits[0], csv_file)
groupboxes = importWidget.findChildren(QtGui.QGroupBox)
radiobuttons = groupboxes[0].findChildren(QtGui.QRadioButton)
for button in radiobuttons:
if button.text() == 'Semicolon':
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
break
checkboxes = importWidget.findChildren(QtGui.QCheckBox)
checkboxes[0].toggle()
model_in = importWidget._previewTableView.model()
# convert critical datetime column:
column_model = model_in.columnDtypeModel()
index = column_model.index(4, 1)
column_model.setData(index, 'date and time')
##
# now we export the data and load it again
##
exportWidget = CSVExportDialog(model_in)
qtbot.addWidget(exportWidget)
exportWidget.show()
lineedits = exportWidget.findChildren(QtGui.QLineEdit)
qtbot.keyClicks(lineedits[0], tmp)
groupboxes = exportWidget.findChildren(QtGui.QGroupBox)
radiobuttons = groupboxes[0].findChildren(QtGui.QRadioButton)
for button in radiobuttons:
if button.text() == 'Semicolon':
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
break
checkboxes = exportWidget.findChildren(QtGui.QCheckBox)
checkboxes[0].toggle()
buttons = exportWidget.findChildren(QtGui.QPushButton)
with qtbot.waitSignal(exportWidget.exported, timeout=3000):
for button in buttons:
if button.text() == 'Export Data':
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
break
import_lineedits[0].clear()
qtbot.keyClicks(import_lineedits[0], tmp)
buttons = importWidget.findChildren(QtGui.QPushButton)
with qtbot.waitSignal(importWidget.load, timeout=3000):
for button in buttons:
if button.text() == 'Load Data':
model_out_in = importWidget._previewTableView.model()
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
break
column_model = model_out_in.columnDtypeModel()
index = column_model.index(4, 1)
column_model.setData(index, 'date and time')
comparator = model_in.dataFrame() == model_out_in.dataFrame()
assert all(comparator)
df = model_out_in.dataFrame()
|
mit
|
glennq/scikit-learn
|
sklearn/decomposition/__init__.py
|
76
|
1490
|
"""
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
|
bsd-3-clause
|
kylerbrown/mpvid
|
setup.py
|
1
|
1712
|
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import io
import sys
import mpvid
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='mpvid',
version=mpvid.__version__,
url='http://github.com/kylerjbrown/mpvid/',
license='BSD',
author='Kyler Brown',
tests_require=['pytest'],
cmdclass={'test': PyTest},
install_requires=['matplotlib'],
author_email='kylerjbrown {at} gmail.com',
description='A quick and dirty matplotlib video library. Uses avconv',
long_description=long_description,
packages=['mpvid'],
include_package_data=True,
platforms='any',
test_suite='mpvid.test.test_mpvid',
classifiers = [
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
],
extras_require={
'testing': ['pytest'],
}
)
|
bsd-2-clause
|
zrhans/pythonanywhere
|
.virtualenvs/django19/lib/python3.4/site-packages/pandas/tests/test_format.py
|
9
|
154444
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
import re
from pandas.compat import range, zip, lrange, StringIO, PY3, lzip, u
import pandas.compat as compat
import itertools
import os
import sys
from textwrap import dedent
import warnings
from numpy import nan
from numpy.random import randn
import numpy as np
div_style = ''
try:
import IPython
if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except ImportError:
pass
from pandas import DataFrame, Series, Index, Timestamp, MultiIndex, date_range, NaT
import pandas.core.format as fmt
import pandas.util.testing as tm
import pandas.core.common as com
from pandas.util.terminal import get_terminal_size
import pandas as pd
from pandas.core.config import (set_option, get_option,
option_context, reset_option)
from datetime import datetime
import nose
_frame = DataFrame(tm.getSeriesData())
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split('\n')[0].startswith("<class")
c2 = r.split('\n')[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
nv = len(r.split('\n')) == 6 # 1. <class>, 2. Index, 3. Columns, 4. dtype, 5. memory usage, 6. trailing newline
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line=='...')[0][0]
except:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix,l in enumerate(r.splitlines()):
if not r.split()[cand_col] == '...':
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match('^[\.\ ]+$',row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
class TestDataFrameFormatting(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
self.frame = _frame.copy()
def tearDown(self):
warnings.filters = self.warn_filters
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self):
self.frame.ix[5] = 0
fmt.set_eng_float_format()
repr(self.frame)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(self.frame)
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
self.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1,columns=range(10),index=range(10))
df.iloc[1,1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
self.assertTrue(('non-null' in buf.getvalue()) is result)
with option_context('display.max_info_rows',20,'display.max_info_columns',20):
check(None, True)
check(True, True)
check(False, False)
with option_context('display.max_info_rows',5,'display.max_info_columns',5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': lzip(range(10), range(10))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(max_len - 1,
max_len + 1)) for i in range(10)]})
r = repr(df)
r = r[r.find('\n') + 1:]
adj = fmt._get_adjustment()
for line, value in lzip(r.split('\n'), df['B']):
if adj.len(value) + 1 > max_len:
self.assertIn('...', line)
else:
self.assertNotIn('...', line)
with option_context("display.max_colwidth", 999999):
self.assertNotIn('...', repr(df))
with option_context("display.max_colwidth", max_len + 2):
self.assertNotIn('...', repr(df))
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5],[0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1')
with option_context("display.chop_threshold", 0.2 ):
self.assertEqual(repr(df), ' 0 1\n0 0.0 0.5\n1 0.5 0.0')
with option_context("display.chop_threshold", 0.6 ):
self.assertEqual(repr(df), ' 0 1\n0 0 0\n1 0 0')
with option_context("display.chop_threshold", None ):
self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1')
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items",2000):
self.assertTrue(len(com.pprint_thing(lrange(1000))) > 1000)
with option_context("display.max_seq_items",5):
self.assertTrue(len(com.pprint_thing(lrange(1000))) < 100)
def test_repr_set(self):
self.assertEqual(com.pprint_thing(set([1])), '{1}')
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then stylized
idx = Index(['a','b'])
res = eval("pd."+repr(idx))
tm.assert_series_equal(Series(res),Series(idx))
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"),
u("\u03c6")]
cols = [u("\u03c8")]
df = DataFrame(data, columns=cols, index=index1)
self.assertTrue(type(df.__repr__()) == str) # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
self.assertTrue('\\' not in repr(df))
def test_expand_frame_repr(self):
df_small = DataFrame('hello', [0], [0])
df_wide = DataFrame('hello', [0], lrange(10))
df_tall = DataFrame('hello', lrange(30), lrange(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10,
'display.width',20,
'display.max_rows', 20,
'display.show_dimensions', True):
with option_context('display.expand_frame_repr', True):
self.assertFalse(has_truncated_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
self.assertFalse(has_truncated_repr(df_wide))
self.assertTrue(has_expanded_repr(df_wide))
self.assertTrue(has_vertically_truncated_repr(df_tall))
self.assertTrue(has_expanded_repr(df_tall))
with option_context('display.expand_frame_repr', False):
self.assertFalse(has_truncated_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
self.assertFalse(has_horizontally_truncated_repr(df_wide))
self.assertFalse(has_expanded_repr(df_wide))
self.assertTrue(has_vertically_truncated_repr(df_tall))
self.assertFalse(has_expanded_repr(df_tall))
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', lrange(1000), lrange(5))
with option_context('mode.sim_interactive', False,
'display.width', 0,
'display.height', 0,
'display.max_rows',5000):
self.assertFalse(has_truncated_repr(df))
self.assertFalse(has_expanded_repr(df))
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
raise nose.SkipTest("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['%05d' % i for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
self.assertFalse(has_expanded_repr(mkframe(4)))
self.assertFalse(has_expanded_repr(mkframe(5)))
self.assertFalse(has_expanded_repr(df6))
self.assertTrue(has_doubly_truncated_repr(df6))
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
self.assertFalse(has_expanded_repr(df6))
self.assertFalse(has_truncated_repr(df6))
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
self.assertFalse(has_expanded_repr(df10))
self.assertTrue(has_vertically_truncated_repr(df10))
# width=None in terminal, auto detection
with option_context('display.max_columns', 100,
'display.max_rows', term_width * 20,
'display.width', None):
df = mkframe((term_width // 7) - 2)
self.assertFalse(has_expanded_repr(df))
df = mkframe((term_width // 7) + 2)
com.pprint_thing(df._repr_fits_horizontal_())
self.assertTrue(has_expanded_repr(df))
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame([{'a': 'foo', 'b': 'bar',
'c': 'uncomfortably long line with lots of stuff',
'd': 1},
{'a': 'foo', 'b': 'bar', 'c': 'stuff', 'd': 1}])
df.set_index(['a', 'b', 'c'])
self.assertTrue(str(df) == ' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
self.assertTrue(str(df) == ' a b c d\n'
'0 foo bar uncomfortably lo... 1\n'
'1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term widht
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context('mode.sim_interactive', True):
with option_context('max_rows',None):
with option_context('max_columns',None):
# Wrap around with None
self.assertTrue(has_expanded_repr(df))
with option_context('max_rows',0):
with option_context('max_columns',0):
# Truncate with auto detection.
self.assertTrue(has_horizontally_truncated_repr(df))
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows',0):
with option_context('max_columns',None):
# Wrap around with None
self.assertTrue(has_expanded_repr(df))
# Truncate vertically
self.assertTrue(has_vertically_truncated_repr(df))
with option_context('max_rows',None):
with option_context('max_columns',0):
self.assertTrue(has_horizontally_truncated_repr(df))
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = [u('\u03c3')] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', u('\u03c3a'), 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except:
pass
if not line.startswith('dtype:'):
self.assertEqual(len(line), line_len)
# it works even if sys.stdin in None
_stdin= sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = self.frame.to_string()
tm.assertIsInstance(result, compat.text_type)
def test_to_string_utf8_columns(self):
n = u("\u05d0").encode('utf-8')
with option_context('display.max_rows', 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({u('c/\u03c3'): []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x%x' % x),
('float', lambda x: '[% 4.1f]' % x),
('object', lambda x: '-%s-' % str(x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=lzip(*formatters)[1])
self.assertEqual(result, (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-'))
self.assertEqual(result, result2)
def test_to_string_with_formatters_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
result = df.to_string(formatters={u('c/\u03c3'):
lambda x: '%s' % x})
self.assertEqual(result, u(' c/\u03c3\n') +
'0 1\n1 2\n2 3')
def test_east_asian_unicode_frame(self):
if PY3:
_rep = repr
else:
_rep = unicode
# not alighned properly because of east asian width
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
self.assertEqual(_rep(df), expected)
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ")
self.assertEqual(_rep(df), expected)
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'], name=u'おおおお'))
expected = (u" a b\nおおおお \nあ あああああ あ\n"
u"い い いいい\nうう う う\nえ えええ ええええええ")
self.assertEqual(_rep(df), expected)
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'], name=u'お'))
expected = (u" あああ いいいいい\nお \nあ あああ あ\n"
u"いいい い いいい\nうう う う\nえ えええええ ええ")
self.assertEqual(_rep(df), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']}, index=idx)
expected = (u" a b\nあ いい あああああ あ\n"
u"う え い いいい\nおおお かかかか う う\n"
u"き くく えええ ええええええ")
self.assertEqual(_rep(df), expected)
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u".. ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# mid col
df = DataFrame({'a': [u'あ', u'いいい', u'う', u'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na 1 あ\n"
u"bb 222 いいい\nc 33333 う\n"
u"ddd 4 ええええええ")
self.assertEqual(_rep(df), expected)
# all col
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" a b\na あああああ あ\n"
u"bb い いいい\nc う う\n"
u"ddd えええ ええええええ""")
self.assertEqual(_rep(df), expected)
# column name
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc う 33333\n"
u"ddd ええええええ 4")
self.assertEqual(_rep(df), expected)
# index
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=[u'あああ', u'いいいいいい', u'うう', u'え'])
expected = (u" a b\nあああ あああああ あ\n"
u"いいいいいい い いいい\nうう う う\n"
u"え えええ ええええええ")
self.assertEqual(_rep(df), expected)
# index name
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']},
index=pd.Index([u'あ', u'い', u'うう', u'え'], name=u'おおおお'))
expected = (u" a b\nおおおお \n"
u"あ あああああ あ\nい い いいい\n"
u"うう う う\nえ えええ ええええええ")
self.assertEqual(_rep(df), expected)
# all
df = DataFrame({u'あああ': [u'あああ', u'い', u'う', u'えええええ'],
u'いいいいい': [u'あ', u'いいい', u'う', u'ええ']},
index=pd.Index([u'あ', u'いいい', u'うう', u'え'], name=u'お'))
expected = (u" あああ いいいいい\nお \n"
u"あ あああ あ\nいいい い いいい\n"
u"うう う う\nえ えええええ ええ")
self.assertEqual(_rep(df), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
df = DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ']}, index=idx)
expected = (u" a b\nあ いい あああああ あ\n"
u"う え い いいい\nおおお かかかか う う\n"
u"き くく えええ ええええええ")
self.assertEqual(_rep(df), expected)
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': [u'あああああ', u'い', u'う', u'えええ'],
'b': [u'あ', u'いいい', u'う', u'ええええええ'],
'c': [u'お', u'か', u'ききき', u'くくくくくく'],
u'ああああ': [u'さ', u'し', u'す', u'せ']},
columns=['a', 'b', 'c', u'ああああ'])
expected = (u" a ... ああああ\n0 あああああ ... さ\n"
u".. ... ... ...\n3 えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
df.index = [u'あああ', u'いいいい', u'う', 'aaa']
expected = (u" a ... ああああ\nあああ あああああ ... さ\n"
u"... ... ... ...\naaa えええ ... せ\n"
u"\n[4 rows x 4 columns]")
self.assertEqual(_rep(df), expected)
# ambiguous unicode
df = DataFrame({u'あああああ': [1, 222, 33333, 4],
'b': [u'あ', u'いいい', u'¡¡', u'ええええええ']},
index=['a', 'bb', 'c', '¡¡¡'])
expected = (u" b あああああ\na あ 1\n"
u"bb いいい 222\nc ¡¡ 33333\n"
u"¡¡¡ ええええええ 4")
self.assertEqual(_rep(df), expected)
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({u('c/\u03c3'): Series()})
nonempty = DataFrame({u('c/\u03c3'): Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
self.assertTrue(c10 < c20 < c30)
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
self.assertEqual(len(with_header_row1), len(no_header))
def test_to_string_truncate_indices(self):
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
for column in [ tm.makeStringIndex ]:
for h in [10,20]:
for w in [10,20]:
with option_context("display.expand_frame_repr",False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
self.assertTrue(has_vertically_truncated_repr(df))
else:
self.assertFalse(has_vertically_truncated_repr(df))
with option_context("display.max_columns", 15):
if w == 20:
self.assertTrue(has_horizontally_truncated_repr(df))
else:
self.assertFalse(has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,"display.max_columns", 15):
if h == 20 and w == 20:
self.assertTrue(has_doubly_truncated_repr(df))
else:
self.assertFalse(has_doubly_truncated_repr(df))
def test_to_string_truncate_multilevel(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays,columns=arrays)
with option_context("display.max_rows", 7,"display.max_columns", 7):
self.assertTrue(has_doubly_truncated_repr(df))
def test_to_html_with_col_space(self):
def check_with_width(df, col_space):
import re
# check that col_space affects HTML generation
# and be very brittle about it.
html = df.to_html(col_space=col_space)
hdrs = [x for x in html.split("\n") if re.search("<th[>\s]", x)]
self.assertTrue(len(hdrs) > 0)
for h in hdrs:
self.assertTrue("min-width" in h)
self.assertTrue(str(col_space) in h)
df = DataFrame(np.random.random(size=(1, 3)))
check_with_width(df, 30)
check_with_width(df, 50)
def test_to_html_with_empty_string_label(self):
# GH3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
res = df.to_html()
self.assertTrue("rowspan" not in res)
def test_to_html_unicode(self):
df = DataFrame({u('\u03c3'): np.arange(10.)})
expected = u'<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>\u03c3</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>1</td>\n </tr>\n <tr>\n <th>2</th>\n <td>2</td>\n </tr>\n <tr>\n <th>3</th>\n <td>3</td>\n </tr>\n <tr>\n <th>4</th>\n <td>4</td>\n </tr>\n <tr>\n <th>5</th>\n <td>5</td>\n </tr>\n <tr>\n <th>6</th>\n <td>6</td>\n </tr>\n <tr>\n <th>7</th>\n <td>7</td>\n </tr>\n <tr>\n <th>8</th>\n <td>8</td>\n </tr>\n <tr>\n <th>9</th>\n <td>9</td>\n </tr>\n </tbody>\n</table>'
self.assertEqual(df.to_html(), expected)
df = DataFrame({'A': [u('\u03c3')]})
expected = u'<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>A</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>\u03c3</td>\n </tr>\n </tbody>\n</table>'
self.assertEqual(df.to_html(), expected)
def test_to_html_escaped(self):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: "<type 'str'>",
b: "<type 'str'>"},
'co>l2':{a: "<type 'str'>",
b: "<type 'str'>"}}
rs = DataFrame(test_dict).to_html()
xp = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>co<l1</th>
<th>co>l2</th>
</tr>
</thead>
<tbody>
<tr>
<th>str<ing1 &amp;</th>
<td><type 'str'></td>
<td><type 'str'></td>
</tr>
<tr>
<th>stri>ng2 &amp;</th>
<td><type 'str'></td>
<td><type 'str'></td>
</tr>
</tbody>
</table>"""
self.assertEqual(xp, rs)
def test_to_html_escape_disabled(self):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: "<b>bold</b>",
b: "<b>bold</b>"},
'co>l2': {a: "<b>bold</b>",
b: "<b>bold</b>"}}
rs = DataFrame(test_dict).to_html(escape=False)
xp = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>co<l1</th>
<th>co>l2</th>
</tr>
</thead>
<tbody>
<tr>
<th>str<ing1 &</th>
<td><b>bold</b></td>
<td><b>bold</b></td>
</tr>
<tr>
<th>stri>ng2 &</th>
<td><b>bold</b></td>
<td><b>bold</b></td>
</tr>
</tbody>
</table>"""
self.assertEqual(xp, rs)
def test_to_html_multiindex_index_false(self):
# issue 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)}
)
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
result = df.to_html(index=False)
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th colspan="2" halign="left">a</th>
<th colspan="2" halign="left">b</th>
</tr>
<tr>
<th>c</th>
<th>d</th>
<th>c</th>
<th>d</th>
</tr>
</thead>
<tbody>
<tr>
<td>0</td>
<td>3</td>
<td>5</td>
<td>3</td>
</tr>
<tr>
<td>1</td>
<td>4</td>
<td>6</td>
<td>4</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
self.assertEqual(result, expected)
def test_to_html_multiindex_sparsify_false_multi_sparse(self):
with option_context('display.multi_sparse', False):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=index[::2], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th>foo</th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th></th>
<th></th>
<th>0</th>
<th>0</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
def test_to_html_multiindex_sparsify(self):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
result = df.to_html()
expected = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th rowspan="2" valign="top">1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=index[::2], index=index)
result = df.to_html()
expected = """\
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th>foo</th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th></th>
<th></th>
<th>0</th>
<th>0</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th rowspan="2" valign="top">1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
def test_to_html_index_formatter(self):
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4))
f = lambda x: 'abcd'[x]
result = df.to_html(formatters={'__index__': f})
expected = """\
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>foo</th>
<th>None</th>
</tr>
</thead>
<tbody>
<tr>
<th>a</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>b</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>c</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>d</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table>"""
self.assertEqual(result, expected)
def test_to_html_regression_GH6098(self):
df = DataFrame({u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')],
u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_()
def test_to_html_truncate(self):
raise nose.SkipTest("unreliable on travis")
index = pd.DatetimeIndex(start='20010101',freq='D',periods=20)
df = DataFrame(index=index,columns=range(20))
fmt.set_option('display.max_rows',8)
fmt.set_option('display.max_columns',4)
result = df._repr_html_()
expected = '''\
<div{0}>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>...</th>
<th>18</th>
<th>19</th>
</tr>
</thead>
<tbody>
<tr>
<th>2001-01-01</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-02</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-03</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-04</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>2001-01-17</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-18</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-19</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>2001-01-20</th>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>20 rows × 20 columns</p>
</div>'''.format(div_style)
if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
def test_to_html_truncate_multi_index(self):
raise nose.SkipTest("unreliable on travis")
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays,columns=arrays)
fmt.set_option('display.max_rows',7)
fmt.set_option('display.max_columns',7)
result = df._repr_html_()
expected = '''\
<div{0}>
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th></th>
<th colspan="2" halign="left">bar</th>
<th>baz</th>
<th>...</th>
<th>foo</th>
<th colspan="2" halign="left">qux</th>
</tr>
<tr>
<th></th>
<th></th>
<th>one</th>
<th>two</th>
<th>one</th>
<th>...</th>
<th>two</th>
<th>one</th>
<th>two</th>
</tr>
</thead>
<tbody>
<tr>
<th rowspan="2" valign="top">bar</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>baz</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>...</th>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>foo</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th rowspan="2" valign="top">qux</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>8 rows × 8 columns</p>
</div>'''.format(div_style)
if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
def test_to_html_truncate_multi_index_sparse_off(self):
raise nose.SkipTest("unreliable on travis")
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays,columns=arrays)
fmt.set_option('display.max_rows',7)
fmt.set_option('display.max_columns',7)
fmt.set_option('display.multi_sparse',False)
result = df._repr_html_()
expected = '''\
<div{0}>
<table border="1" class="dataframe">
<thead>
<tr>
<th></th>
<th></th>
<th>bar</th>
<th>bar</th>
<th>baz</th>
<th>...</th>
<th>foo</th>
<th>qux</th>
<th>qux</th>
</tr>
<tr>
<th></th>
<th></th>
<th>one</th>
<th>two</th>
<th>one</th>
<th>...</th>
<th>two</th>
<th>one</th>
<th>two</th>
</tr>
</thead>
<tbody>
<tr>
<th>bar</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>bar</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>baz</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>foo</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>qux</th>
<th>one</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>qux</th>
<th>two</th>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>...</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>8 rows × 8 columns</p>
</div>'''.format(div_style)
if compat.PY2:
expected = expected.decode('utf-8')
self.assertEqual(result, expected)
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
self.assertEqual(len(lines[1]), len(lines[2]))
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({u('c/\u03c3'): Series({'test': np.NaN})})
compat.text_type(dm.to_string())
def test_string_repr_encoding(self):
filepath = tm.get_data_path('unicode_series.csv')
df = pd.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': np.inf * np.empty(10)})
repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_pprint_thing(self):
from pandas.core.common import pprint_thing as pp_t
if PY3:
raise nose.SkipTest("doesn't work on Python 3")
self.assertEqual(pp_t('a') , u('a'))
self.assertEqual(pp_t(u('a')) , u('a'))
self.assertEqual(pp_t(None) , 'None')
self.assertEqual(pp_t(u('\u05d0'), quote_strings=True),
u("u'\u05d0'"))
self.assertEqual(pp_t(u('\u05d0'), quote_strings=False),
u('\u05d0'))
self.assertEqual(pp_t((u('\u05d0'),
u('\u05d1')), quote_strings=True),
u("(u'\u05d0', u'\u05d1')"))
self.assertEqual(pp_t((u('\u05d0'), (u('\u05d1'),
u('\u05d2'))),
quote_strings=True),
u("(u'\u05d0', (u'\u05d1', u'\u05d2'))"))
self.assertEqual(pp_t(('foo', u('\u05d0'), (u('\u05d0'),
u('\u05d0'))),
quote_strings=True),
u("(u'foo', u'\u05d0', (u'\u05d0', u'\u05d0'))"))
# escape embedded tabs in string
# GH #2038
self.assertTrue(not "\t" in pp_t("a\tb", escape_chars=("\t",)))
def test_wide_repr(self):
with option_context('mode.sim_interactive', True, 'display.show_dimensions', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x %d columns" % (max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 120):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(randn(5, 3), columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
self.assertEqual(len(rep_str.splitlines()), 20)
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
for line in wide_repr.splitlines()[1::13]:
self.assertIn('DataFrame Index', line)
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
for line in wide_repr.splitlines()[1::13]:
self.assertIn('Level 0 Level 1', line)
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
midx = MultiIndex.from_arrays(
tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(
tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assertNotEqual(rep_str, wide_repr)
with option_context('display.width', 150):
wider_repr = repr(df)
self.assertTrue(len(wider_repr) < len(wide_repr))
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(
{'a': ['a' * 30, 'b' * 30], 'b': ['c' * 70, 'd' * 80]})
result = repr(df)
self.assertTrue('ccccc' in result)
self.assertTrue('ddddd' in result)
def test_long_series(self):
n = 1000
s = Series(np.random.randint(-50,50,n),index=['s%04d' % x for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype',str_rep))
self.assertEqual(nmatches, 1)
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3', 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
'id3': {0: '78d', 1: '79d'}, 'value': {0: 123, 1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = u(' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# index
y = df.set_index('id2')
result = y.to_string()
expected = u(' id1 id3 value\nid2 \nNaN 1a3 78d 123\nd67 9h4 79d 64')
self.assertEqual(result, expected)
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = u(' value\nid1 id2 id3 \n1a3 NaN 78d 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
# all-nan in mi
df2 = df.copy()
df2.ix[:,'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = u(' id1 id3 value\nid2 \nNaN 1a3 78d 123\nNaN 9h4 79d 64')
self.assertEqual(result, expected)
# partial nan in mi
df2 = df.copy()
df2.ix[:,'id2'] = np.nan
y = df2.set_index(['id2','id3'])
result = y.to_string()
expected = u(' id1 value\nid2 id3 \nNaN 78d 1a3 123\n 79d 9h4 64')
self.assertEqual(result, expected)
df = DataFrame({'id1': {0: np.nan, 1: '9h4'}, 'id2': {0: np.nan, 1: 'd67'},
'id3': {0: np.nan, 1: '79d'}, 'value': {0: 123, 1: 64}})
y = df.set_index(['id1','id2','id3'])
result = y.to_string()
expected = u(' value\nid1 id2 id3 \nNaN NaN NaN 123\n9h4 d67 79d 64')
self.assertEqual(result, expected)
def test_to_string(self):
from pandas import read_table
import re
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
self.assertIsNone(retval)
self.assertEqual(buf.getvalue(), s)
tm.assertIsInstance(s, compat.string_types)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join([re.sub('\s+', ' ', x).strip() for x in lines[1:]])
recons = read_table(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
self.assertEqual(recons['A'].count(), biggie['A'].count())
self.assertTrue((np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all())
# expected = ['B', 'A']
# self.assertEqual(header, expected)
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
self.assertEqual(header, expected)
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12,
float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
self.assertEqual(df_s, expected)
def test_to_string_no_index(self):
df = DataFrame({'x': [1, 2, 3],
'y': [4, 5, 6]})
df_s = df.to_string(index=False)
expected = " x y\n 1 4\n 2 5\n 3 6"
self.assertEqual(df_s, expected)
def test_to_string_float_formatting(self):
self.reset_display_options()
fmt.set_option('display.precision', 5, 'display.column_space',
12, 'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6,
1.7e+8, 1.253456, np.pi, -1e6]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
self.assertEqual(df_s, expected)
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n'
'0 3234.000\n'
'1 0.253')
self.assertEqual(df_s, expected)
self.reset_display_options()
self.assertEqual(get_option("display.precision"), 6)
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
# Python 2.5 just wants me to be sad. And debian 32-bit
# sys.version_info[0] == 2 and sys.version_info[1] < 6:
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
self.assertEqual(df_s, expected)
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '%.4g' % 1.7e8 == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
self.assertEqual(result, expected)
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n'
'0 0\n'
'1 0\n'
'2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(lrange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
self.assertEqual(result, expected)
def test_to_string_ascii_error(self):
data = [('0 ',
u(' .gitignore '),
u(' 5 '),
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
self.assertTrue(issubclass(df['x'].dtype.type, np.integer))
output = df.to_string()
expected = (' x\n'
'0 -15\n'
'1 20\n'
'2 25\n'
'3 -35')
self.assertEqual(output, expected)
def test_to_string_index_formatter(self):
df = DataFrame([lrange(5), lrange(5, 10), lrange(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc'[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
self.assertEqual(rs, xp)
def test_to_string_left_justify_cols(self):
self.reset_display_options()
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n'
'0 3234.000\n'
'1 0.253')
self.assertEqual(df_s, expected)
def test_to_string_format_na(self):
self.reset_display_options()
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
self.assertEqual(result, expected)
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1 foo\n'
'2 -2 foooo\n'
'3 3 fooooo\n'
'4 4 bar')
self.assertEqual(result, expected)
def test_to_string_line_width(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
s = df.to_string(line_width=80)
self.assertEqual(max(len(l) for l in s.split('\n')), 80)
def test_show_dimensions(self):
df = DataFrame(123, lrange(10, 15), lrange(30))
with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', True):
self.assertTrue('5 rows' in str(df))
self.assertTrue('5 rows' in df._repr_html_())
with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', False):
self.assertFalse('5 rows' in str(df))
self.assertFalse('5 rows' in df._repr_html_())
with option_context('display.max_rows', 2, 'display.max_columns', 2, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', 'truncate'):
self.assertTrue('5 rows' in str(df))
self.assertTrue('5 rows' in df._repr_html_())
with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width',
500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', 'truncate'):
self.assertFalse('5 rows' in str(df))
self.assertFalse('5 rows' in df._repr_html_())
def test_to_html(self):
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
s = biggie.to_html()
buf = StringIO()
retval = biggie.to_html(buf=buf)
self.assertIsNone(retval)
self.assertEqual(buf.getvalue(), s)
tm.assertIsInstance(s, compat.string_types)
biggie.to_html(columns=['B', 'A'], col_space=17)
biggie.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '%.1f' % x})
biggie.to_html(columns=['B', 'A'], float_format=str)
biggie.to_html(columns=['B', 'A'], col_space=12,
float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_html()
def test_to_html_filename(self):
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
with tm.ensure_clean('test.html') as path:
biggie.to_html(path)
with open(path, 'r') as f:
s = biggie.to_html()
s2 = f.read()
self.assertEqual(s, s2)
frame = DataFrame(index=np.arange(200))
with tm.ensure_clean('test.html') as path:
frame.to_html(path)
with open(path, 'r') as f:
self.assertEqual(frame.to_html(), f.read())
def test_to_html_with_no_bold(self):
x = DataFrame({'x': randn(5)})
ashtml = x.to_html(bold_rows=False)
self.assertFalse('<strong' in ashtml[ashtml.find("</thead>")])
def test_to_html_columns_arg(self):
result = self.frame.to_html(columns=['A'])
self.assertNotIn('<th>B</th>', result)
def test_to_html_multiindex(self):
columns = MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2),
np.mod(lrange(4), 2))),
names=['CL0', 'CL1'])
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify='left')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr>\n'
' <th>CL0</th>\n'
' <th colspan="2" halign="left">0</th>\n'
' <th colspan="2" halign="left">1</th>\n'
' </tr>\n'
' <tr>\n'
' <th>CL1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>a</td>\n'
' <td>b</td>\n'
' <td>c</td>\n'
' <td>d</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>e</td>\n'
' <td>f</td>\n'
' <td>g</td>\n'
' <td>h</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
columns = MultiIndex.from_tuples(list(zip(range(4),
np.mod(lrange(4), 2))))
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify='right')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr>\n'
' <th></th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>2</th>\n'
' <th>3</th>\n'
' </tr>\n'
' <tr>\n'
' <th></th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' <th>0</th>\n'
' <th>1</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>a</td>\n'
' <td>b</td>\n'
' <td>c</td>\n'
' <td>d</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>e</td>\n'
' <td>f</td>\n'
' <td>g</td>\n'
' <td>h</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
def test_to_html_justify(self):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify='left')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: left;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>6</td>\n'
' <td>1</td>\n'
' <td>223442</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>30000</td>\n'
' <td>2</td>\n'
' <td>0</td>\n'
' </tr>\n'
' <tr>\n'
' <th>2</th>\n'
' <td>2</td>\n'
' <td>70000</td>\n'
' <td>1</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
result = df.to_html(justify='right')
expected = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>0</th>\n'
' <td>6</td>\n'
' <td>1</td>\n'
' <td>223442</td>\n'
' </tr>\n'
' <tr>\n'
' <th>1</th>\n'
' <td>30000</td>\n'
' <td>2</td>\n'
' <td>0</td>\n'
' </tr>\n'
' <tr>\n'
' <th>2</th>\n'
' <td>2</td>\n'
' <td>70000</td>\n'
' <td>1</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(result, expected)
def test_to_html_index(self):
index = ['foo', 'bar', 'baz']
df = DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.NaN]},
columns=['A', 'B', 'C'],
index=index)
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>foo</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>baz</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
expected_without_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
result = df.to_html(index=False)
for i in index:
self.assertNotIn(i, result)
self.assertEqual(result, expected_without_index)
df.index = Index(['foo', 'bar', 'baz'], name='idx')
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' <tr>\n'
' <th>idx</th>\n'
' <th></th>\n'
' <th></th>\n'
' <th></th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th>foo</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>baz</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
self.assertEqual(df.to_html(index=False), expected_without_index)
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th rowspan="2" valign="top">foo</th>\n'
' <th>car</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bike</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <th>car</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
self.assertNotIn(i, result)
# must be the same result as normal index
self.assertEqual(result, expected_without_index)
df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
expected_with_index = ('<table border="1" class="dataframe">\n'
' <thead>\n'
' <tr style="text-align: right;">\n'
' <th></th>\n'
' <th></th>\n'
' <th>A</th>\n'
' <th>B</th>\n'
' <th>C</th>\n'
' </tr>\n'
' <tr>\n'
' <th>idx1</th>\n'
' <th>idx2</th>\n'
' <th></th>\n'
' <th></th>\n'
' <th></th>\n'
' </tr>\n'
' </thead>\n'
' <tbody>\n'
' <tr>\n'
' <th rowspan="2" valign="top">foo</th>\n'
' <th>car</th>\n'
' <td>1</td>\n'
' <td>1.2</td>\n'
' <td>one</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bike</th>\n'
' <td>2</td>\n'
' <td>3.4</td>\n'
' <td>two</td>\n'
' </tr>\n'
' <tr>\n'
' <th>bar</th>\n'
' <th>car</th>\n'
' <td>3</td>\n'
' <td>5.6</td>\n'
' <td>NaN</td>\n'
' </tr>\n'
' </tbody>\n'
'</table>')
self.assertEqual(df.to_html(), expected_with_index)
self.assertEqual(df.to_html(index=False), expected_without_index)
def test_repr_html(self):
self.frame._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
self.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
self.assertTrue('2 rows' in df._repr_html_())
fmt.set_option('display.show_dimensions', False)
self.assertFalse('2 rows' in df._repr_html_())
self.reset_display_options()
def test_repr_html_wide(self):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
reg_repr = df._repr_html_()
assert "..." not in reg_repr
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
wide_repr = wide_df._repr_html_()
assert "..." in wide_repr
def test_repr_html_wide_multiindex_cols(self):
max_cols = get_option('display.max_columns')
mcols = MultiIndex.from_product([np.arange(max_cols//2),
['foo', 'bar']],
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
mcols = MultiIndex.from_product((np.arange(1+(max_cols//2)),
['foo', 'bar']),
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
wide_repr = df._repr_html_()
assert '...' in wide_repr
def test_repr_html_long(self):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)})
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)})
long_repr = df._repr_html_()
assert '..' in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_float(self):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx')
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(40 + h) in reg_repr
h = max_rows + 1
df = DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx')
long_repr = df._repr_html_()
assert '..' in long_repr
assert '31' not in long_repr
assert u('%d rows ') % h in long_repr
assert u('2 columns') in long_repr
def test_repr_html_long_multiindex(self):
max_rows = get_option('display.max_rows')
max_L1 = max_rows//2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1*2, 2), index=idx,
columns=['A', 'B'])
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1+1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1+1)*2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = get_option('display.max_columns')
max_rows = get_option('display.max_rows')
h, w = max_rows-1, max_cols-1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '...' not in df._repr_html_()
h, w = max_rows+1, max_cols+1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '...' in df._repr_html_()
def test_info_repr(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows+1, max_cols-1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows-1, max_cols+1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert has_horizontally_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(randn(10, 5))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
self.assertTrue(has_non_verbose_info_repr(df))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 5):
self.assertFalse(has_non_verbose_info_repr(df))
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = get_option('display.max_rows')
max_cols = get_option('display.max_columns')
# Long
h, w = max_rows+1, max_cols-1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert r'<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert r'<class' in df._repr_html_()
# Wide
h, w = max_rows-1, max_cols+1
df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w)))
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self):
def get_ipython():
return {'config':
{'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = self.frame._repr_html_()
self.assertIsNotNone(repstr)
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
self.assertIn('class', repstr) # info fallback
self.reset_display_options()
def test_to_html_with_classes(self):
df = DataFrame()
result = df.to_html(classes="sortable draggable")
expected = dedent("""
<table border="1" class="dataframe sortable draggable">
<thead>
<tr style="text-align: right;">
<th></th>
</tr>
</thead>
<tbody>
</tbody>
</table>
""").strip()
self.assertEqual(result, expected)
result = df.to_html(classes=["sortable", "draggable"])
self.assertEqual(result, expected)
def test_pprint_pathological_object(self):
"""
if the test fails, the stack will overflow and nose crash,
but it won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't dine
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
self.assertTrue(('+010' in line) or skip)
else:
self.assertTrue(('+10' in line) or skip)
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
self.assertTrue("'a': 1" in val)
self.assertTrue("'b': 2" in val)
def test_to_latex_filename(self):
with tm.ensure_clean('test.tex') as path:
self.frame.to_latex(path)
with open(path, 'r') as f:
self.assertEqual(self.frame.to_latex(), f.read())
def test_to_latex(self):
# it works!
self.frame.to_latex()
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex()
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
a & b \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_latex_format(self):
# GH Bug #9402
self.frame.to_latex(column_format='ccc')
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex(column_format='ccc')
withindex_expected = r"""\begin{tabular}{ccc}
\toprule
{} & a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
def test_to_latex_multiindex(self):
df = DataFrame({('x', 'y'): ['a']})
result = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & x \\
{} & y \\
\midrule
0 & a \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
result = df.T.to_latex()
expected = r"""\begin{tabular}{lll}
\toprule
& & 0 \\
\midrule
x & y & a \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
df = DataFrame.from_dict({
('c1', 0): pd.Series(dict((x, x) for x in range(4))),
('c1', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c2', 0): pd.Series(dict((x, x) for x in range(4))),
('c2', 1): pd.Series(dict((x, x + 4) for x in range(4))),
('c3', 0): pd.Series(dict((x, x) for x in range(4))),
}).T
result = df.to_latex()
expected = r"""\begin{tabular}{llrrrr}
\toprule
& & 0 & 1 & 2 & 3 \\
\midrule
c1 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c2 & 0 & 0 & 1 & 2 & 3 \\
& 1 & 4 & 5 & 6 & 7 \\
c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
# GH 10660
df = pd.DataFrame({'a':[0,0,1,1], 'b':list('abab'), 'c':[1,2,3,4]})
result = df.set_index(['a', 'b']).to_latex()
expected = r"""\begin{tabular}{llr}
\toprule
& & c \\
a & b & \\
\midrule
0 & a & 1 \\
& b & 2 \\
1 & a & 3 \\
& b & 4 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
result = df.groupby('a').describe().to_latex()
expected = r"""\begin{tabular}{llr}
\toprule
& & c \\
a & {} & \\
\midrule
0 & count & 2.000000 \\
& mean & 1.500000 \\
& std & 0.707107 \\
& min & 1.000000 \\
& 25\% & 1.250000 \\
& 50\% & 1.500000 \\
& 75\% & 1.750000 \\
& max & 2.000000 \\
1 & count & 2.000000 \\
& mean & 3.500000 \\
& std & 0.707107 \\
& min & 3.000000 \\
& 25\% & 3.250000 \\
& 50\% & 3.500000 \\
& 75\% & 3.750000 \\
& max & 4.000000 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(result, expected)
def test_to_latex_escape(self):
a = 'a'
b = 'b'
test_dict = {u('co^l1') : {a: "a",
b: "b"},
u('co$e^x$'): {a: "a",
b: "b"}}
unescaped_result = DataFrame(test_dict).to_latex(escape=False)
escaped_result = DataFrame(test_dict).to_latex() # default: escape=True
unescaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co$e^x$ & co^l1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
escaped_expected = r'''\begin{tabular}{lll}
\toprule
{} & co\$e\textasciicircumx\$ & co\textasciicircuml1 \\
\midrule
a & a & a \\
b & b & b \\
\bottomrule
\end{tabular}
'''
self.assertEqual(unescaped_result, unescaped_expected)
self.assertEqual(escaped_result, escaped_expected)
def test_to_latex_longtable(self):
self.frame.to_latex(longtable=True)
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex(longtable=True)
withindex_expected = r"""\begin{longtable}{lrl}
\toprule
{} & a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
0 & 1 & b1 \\
1 & 2 & b2 \\
\end{longtable}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False, longtable=True)
withoutindex_expected = r"""\begin{longtable}{rl}
\toprule
a & b \\
\midrule
\endhead
\midrule
\multicolumn{3}{r}{{Continued on next page}} \\
\midrule
\endfoot
\bottomrule
\endlastfoot
1 & b1 \\
2 & b2 \\
\end{longtable}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_latex_escape_special_chars(self):
special_characters = ['&','%','$','#','_',
'{','}','~','^','\\']
df = DataFrame(data=special_characters)
observed = df.to_latex()
expected = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & \& \\
1 & \% \\
2 & \$ \\
3 & \# \\
4 & \_ \\
5 & \{ \\
6 & \} \\
7 & \textasciitilde \\
8 & \textasciicircum \\
9 & \textbackslash \\
\bottomrule
\end{tabular}
"""
self.assertEqual(observed, expected)
def test_to_latex_no_header(self):
# GH 7124
df = DataFrame({'a': [1, 2],
'b': ['b1', 'b2']})
withindex_result = df.to_latex(header=False)
withindex_expected = r"""\begin{tabular}{lrl}
\toprule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withindex_result, withindex_expected)
withoutindex_result = df.to_latex(index=False, header=False)
withoutindex_expected = r"""\begin{tabular}{rl}
\toprule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
self.assertEqual(withoutindex_result, withoutindex_expected)
def test_to_csv_quotechar(self):
df = DataFrame({'col' : [1,2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$", engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None, engine='python')
def test_to_csv_doublequote(self):
df = DataFrame({'col' : ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True, engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(Error, 'escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
with tm.ensure_clean('test.csv') as path:
with tm.assertRaisesRegexp(Error, 'escapechar'):
df.to_csv(path, doublequote=False, engine='python')
def test_to_csv_escapechar(self):
df = DataFrame({'col' : ['a"a', '"bb"']})
expected = """\
"","col"
"0","a\\"a"
"1","\\"bb\\""
"""
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\',
engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
df = DataFrame({'col' : ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\', engine='python')
with open(path, 'r') as f:
self.assertEqual(f.read(), expected)
def test_csv_to_string(self):
df = DataFrame({'col' : [1,2]})
expected = ',col\n0,1\n1,2\n'
self.assertEqual(df.to_csv(), expected)
def test_to_csv_decimal(self):
# GH 781
df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
self.assertEqual(df.to_csv(), expected_default)
expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
self.assertEqual(df.to_csv(decimal=',',sep=';'), expected_european_excel)
expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
self.assertEqual(df.to_csv(float_format = '%.2f'), expected_float_format_default)
expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
self.assertEqual(df.to_csv(decimal=',',sep=';', float_format = '%.2f'), expected_float_format)
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101',periods=5,freq='s')})
df_day = DataFrame({'A': pd.date_range('20130101',periods=5,freq='d')})
expected_default_sec = ',A\n0,2013-01-01 00:00:00\n1,2013-01-01 00:00:01\n2,2013-01-01 00:00:02' + \
'\n3,2013-01-01 00:00:03\n4,2013-01-01 00:00:04\n'
self.assertEqual(df_sec.to_csv(), expected_default_sec)
expected_ymdhms_day = ',A\n0,2013-01-01 00:00:00\n1,2013-01-02 00:00:00\n2,2013-01-03 00:00:00' + \
'\n3,2013-01-04 00:00:00\n4,2013-01-05 00:00:00\n'
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S'), expected_ymdhms_day)
expected_ymd_sec = ',A\n0,2013-01-01\n1,2013-01-01\n2,2013-01-01\n3,2013-01-01\n4,2013-01-01\n'
self.assertEqual(df_sec.to_csv(date_format='%Y-%m-%d'), expected_ymd_sec)
expected_default_day = ',A\n0,2013-01-01\n1,2013-01-02\n2,2013-01-03\n3,2013-01-04\n4,2013-01-05\n'
self.assertEqual(df_day.to_csv(), expected_default_day)
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d'), expected_default_day)
# testing if date_format parameter is taken into account for
# multi-indexed dataframes (GH 7791)
df_sec['B'] = 0
df_sec['C'] = 1
expected_ymd_sec = 'A,B,C\n2013-01-01,0,1\n'
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
self.assertEqual(
df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d'),
expected_ymd_sec
)
# deprecation GH11274
def test_to_csv_engine_kw_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
df.to_csv(engine='python')
def test_round_dataframe(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with self.assertRaises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(
df.round(partial_round_dict), expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(
df.round(wrong_round_dict), expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
if sys.version < LooseVersion('2.7'):
# np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6
with self.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
df.round(non_int_round_dict)
else:
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1':[110., 210, 310], 'col2':[100., 200, 300]})
tm.assert_frame_equal(
big_df.round(negative_round_dict), expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2':1})
expected_nan_round = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
if sys.version < LooseVersion('2.7'):
# Rounding with decimal is a ValueError in Python < 2.7
with self.assertRaises(ValueError):
df.round(nan_round_Series)
else:
with self.assertRaises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
self.assertTrue(rounded.index.equals(dfs.index))
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
self.assertRaises(ValueError, df.round, decimals)
class TestSeriesFormatting(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series([u('\u03c3')] * 10)
repr(s)
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assertIsNone(retval)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')[:-1]]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, 'Series([], Freq: B)')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, 'Series([], Freq: B)')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Freq: B, Name: foo, Length: %d, dtype: float64" % len(cp))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
self.assertTrue('Freq: D, Name: 0' in result)
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = (u('0 foo\n') +
u('1 NaN\n') +
u('2 -1.23\n') +
u('3 4.56'))
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') +
'1 NaN\n' +
'2 bar\n' +
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = (u('0 foo\n') +
'1 5\n' +
'2 bar\n' +
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = (u('0 NaN\n') +
'1 1.5678\n' +
'2 NaN\n' +
'3 -3.0000\n' +
'4 NaN')
self.assertEqual(result, expected)
def test_unicode_name_in_footer(self):
s = Series([1, 2], name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf = fmt.SeriesFormatter(s, name=u('\u05e2\u05d1\u05e8\u05d9\u05ea'))
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
if PY3:
_rep = repr
else:
_rep = unicode
# not alighned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\nう え 22\nおおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\nAB 22\n"
u"2011-01-01 00:00:00 3333\nあああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=[u'あ', u'いい', u'ううう', u'ええええ'])
expected = (u"あ a\nいい bb\nううう CCC\n"
u"ええええ D\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode values
s = Series([u'あ', u'いい', u'ううう', u'ええええ'], index=['a', 'bb', 'c', 'ddd'])
expected = (u"a あ\nbb いい\nc ううう\n"
u"ddd ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# both
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'])
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
# unicode footer
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
index=[u'ああ', u'いいいい', u'う', u'えええ'],
name=u'おおおおおおお')
expected = (u"ああ あ\nいいいい いい\nう ううう\n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# MultiIndex
idx = pd.MultiIndex.from_tuples([(u'あ', u'いい'), (u'う', u'え'),
(u'おおお', u'かかかか'), (u'き', u'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = (u"あ いい 1\nう え 22\nおおお かかかか 3333\n"
u"き くく 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, u'あああ'])
expected = (u"1 1\nAB 22\nNaN 3333\n"
u"あああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), u'あああ'])
expected = (u"1 1\nAB 22\n"
u"2011-01-01 00:00:00 3333\nあああ 44444\ndtype: int64")
self.assertEqual(_rep(s), expected)
# truncate
with option_context('display.max_rows', 3):
s = Series([u'あ', u'いい', u'ううう', u'ええええ'],
name=u'おおおおおおお')
expected = (u"0 あ\n ... \n"
u"3 ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
s.index = [u'ああ', u'いいいい', u'う', u'えええ']
expected = (u"ああ あ\n ... \n"
u"えええ ええええ\nName: おおおおおおお, dtype: object")
self.assertEqual(_rep(s), expected)
# ambiguous unicode
s = Series([u'¡¡', u'い¡¡', u'ううう', u'ええええ'],
index=[u'ああ', u'¡¡¡¡いい', u'¡¡', u'えええ'])
expected = (u"ああ ¡¡\n¡¡¡¡いい い¡¡\n¡¡ ううう\n"
u"えええ ええええ\ndtype: object")
self.assertEqual(_rep(s), expected)
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
self.assertIn('+010', line)
else:
self.assertIn('+10', line)
def test_datetimeindex(self):
index = date_range('20130102',periods=6)
s = Series(1,index=index)
result = s.to_string()
self.assertTrue('2013-01-02' in result)
# nat in index
s2 = Series(2, index=[ Timestamp('20130111'), NaT ])
s = s2.append(s)
result = s.to_string()
self.assertTrue('NaT' in result)
# nat in summary
result = str(s2.index)
self.assertTrue('NaT' in result)
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s-s.shift(1)
result = y.to_string()
self.assertTrue('1 days' in result)
self.assertTrue('00:00:00' not in result)
self.assertTrue('NaT' in result)
# with frac seconds
o = Series([datetime(2012,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +23:59:59.999850' in result)
# rounding?
o = Series([datetime(2012,1,1,1)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +23:00:00' in result)
self.assertTrue('1 days 23:00:00' in result)
o = Series([datetime(2012,1,1,1,1)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +22:59:00' in result)
self.assertTrue('1 days 22:59:00' in result)
o = Series([datetime(2012,1,1,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
self.assertTrue('-1 days +22:58:59.999850' in result)
self.assertTrue('0 days 22:58:59.999850' in result)
# neg time
td = timedelta(minutes=5,seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
self.assertTrue('-1 days +23:54:57' in result)
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
self.assertTrue('2012-01-01 23:59:59.999450' in result)
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days',periods=3))
result = td.to_string()
self.assertEqual(result,u("0 1 days\n1 2 days\n2 3 days"))
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2],
'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.ix[0])
self.assertTrue('2012-01-01' in result)
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
s = Series(randn(8), index=index)
with option_context("display.max_rows", 10):
self.assertEqual(len(str(s).split('\n')),10)
with option_context("display.max_rows", 3):
self.assertEqual(len(str(s).split('\n')),5)
with option_context("display.max_rows", 2):
self.assertEqual(len(str(s).split('\n')),5)
with option_context("display.max_rows", 1):
self.assertEqual(len(str(s).split('\n')),4)
with option_context("display.max_rows", 0):
self.assertEqual(len(str(s).split('\n')),10)
# index
s = Series(randn(8), None)
with option_context("display.max_rows", 10):
self.assertEqual(len(str(s).split('\n')),9)
with option_context("display.max_rows", 3):
self.assertEqual(len(str(s).split('\n')),4)
with option_context("display.max_rows", 2):
self.assertEqual(len(str(s).split('\n')),4)
with option_context("display.max_rows", 1):
self.assertEqual(len(str(s).split('\n')),3)
with option_context("display.max_rows", 0):
self.assertEqual(len(str(s).split('\n')),9)
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1,1,1,1,1,1,1,1,1,1,0.9999,1,1]*10)
with option_context("display.max_rows", 10):
res = repr(s)
exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 '
'1.0000\n4 1.0000\n ... \n125 '
'1.0000\n126 1.0000\n127 0.9999\n128 '
'1.0000\n129 1.0000\ndtype: float64')
self.assertEqual(res, exp)
@staticmethod
def gen_test_series():
s1 = pd.Series(['a']*100)
s2 = pd.Series(['ab']*100)
s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])
s4 = s3[::-1]
test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}
return test_sers
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n') \
if not re.match('[^\.]*\.+', line)][:-1]
ncolsizes = len(set(len(line.strip()) for line in lines))
self.assertEqual(ncolsizes, 1)
def test_format_explicit(self):
test_sers = self.gen_test_series()
with option_context("display.max_rows", 4):
res = repr(test_sers['onel'])
exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object'
self.assertEqual(exp, res)
res = repr(test_sers['twol'])
exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:'
' object')
self.assertEqual(exp, res)
res = repr(test_sers['asc'])
exp = ('0 a\n1 ab\n ... \n4 abcde\n5'
' abcdef\ndtype: object')
self.assertEqual(exp, res)
res = repr(test_sers['desc'])
exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0'
' a\ndtype: object')
self.assertEqual(exp, res)
def test_ncols(self):
test_sers = self.gen_test_series()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10),dtype='int64')
with option_context("display.max_rows", 1):
strrepr = repr(s).split('\n')
exp1 = ['0', '0']
res1 = strrepr[0].split()
self.assertEqual(exp1, res1)
exp2 = ['..']
res2 = strrepr[1].split()
self.assertEqual(exp2, res2)
def test_truncate_ndots(self):
def getndots(s):
return len(re.match('[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
self.assertEqual(getndots(strrepr), 2)
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
self.assertEqual(getndots(strrepr), 3)
def test_to_string_name(self):
s = Series(range(100),dtype='int64')
s.name = 'myser'
res = s.to_string(max_rows=2, name=True)
exp = '0 0\n ..\n99 99\nName: myser'
self.assertEqual(res, exp)
res = s.to_string(max_rows=2, name=False)
exp = '0 0\n ..\n99 99'
self.assertEqual(res, exp)
def test_to_string_dtype(self):
s = Series(range(100),dtype='int64')
res = s.to_string(max_rows=2, dtype=True)
exp = '0 0\n ..\n99 99\ndtype: int64'
self.assertEqual(res, exp)
res = s.to_string(max_rows=2, dtype=False)
exp = '0 0\n ..\n99 99'
self.assertEqual(res, exp)
def test_to_string_length(self):
s = Series(range(100),dtype='int64')
res = s.to_string(max_rows=2, length=True)
exp = '0 0\n ..\n99 99\nLength: 100'
self.assertEqual(res, exp)
def test_to_string_na_rep(self):
s = pd.Series(index=range(100))
res = s.to_string(na_rep='foo', max_rows=2)
exp = '0 foo\n ..\n99 foo'
self.assertEqual(res, exp)
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype='float64')
res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),
max_rows=2)
exp = '0 0.0\n ..\n9 9.0'
self.assertEqual(res, exp)
def test_to_string_header(self):
s = pd.Series(range(10),dtype='int64')
s.index.name = 'foo'
res = s.to_string(header=True, max_rows=2)
exp = 'foo\n0 0\n ..\n9 9'
self.assertEqual(res, exp)
res = s.to_string(header=False, max_rows=2)
exp = '0 0\n ..\n9 9'
self.assertEqual(res, exp)
class TestEngFormatter(tm.TestCase):
_multiprocess_can_split_ = True
def test_eng_float_formatter(self):
df = DataFrame({'A': [1.41, 141., 14100, 1410000.]})
fmt.set_eng_float_format()
result = df.to_string()
expected = (' A\n'
'0 1.410E+00\n'
'1 141.000E+00\n'
'2 14.100E+03\n'
'3 1.410E+06')
self.assertEqual(result, expected)
fmt.set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = (' A\n'
'0 1.410\n'
'1 141.000\n'
'2 14.100k\n'
'3 1.410M')
self.assertEqual(result, expected)
fmt.set_eng_float_format(accuracy=0)
result = df.to_string()
expected = (' A\n'
'0 1E+00\n'
'1 141E+00\n'
'2 14E+03\n'
'3 1E+06')
self.assertEqual(result, expected)
self.reset_display_options()
def compare(self, formatter, input, output):
formatted_input = formatter(input)
msg = ("formatting of %s results in '%s', expected '%s'"
% (str(input), formatted_input, output))
self.assertEqual(formatted_input, output, msg)
def compare_all(self, formatter, in_out):
"""
Parameters:
-----------
formatter: EngFormatter under test
in_out: list of tuples. Each tuple = (number, expected_formatting)
It is tested if 'formatter(number) == expected_formatting'.
*number* should be >= 0 because formatter(-number) == fmt is also
tested. *fmt* is derived from *expected_formatting*
"""
for input, output in in_out:
self.compare(formatter, input, output)
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [(f * 10 ** -24, " 1.414y"),
(f * 10 ** -23, " 14.142y"),
(f * 10 ** -22, " 141.421y"),
(f * 10 ** -21, " 1.414z"),
(f * 10 ** -20, " 14.142z"),
(f * 10 ** -19, " 141.421z"),
(f * 10 ** -18, " 1.414a"),
(f * 10 ** -17, " 14.142a"),
(f * 10 ** -16, " 141.421a"),
(f * 10 ** -15, " 1.414f"),
(f * 10 ** -14, " 14.142f"),
(f * 10 ** -13, " 141.421f"),
(f * 10 ** -12, " 1.414p"),
(f * 10 ** -11, " 14.142p"),
(f * 10 ** -10, " 141.421p"),
(f * 10 ** -9, " 1.414n"),
(f * 10 ** -8, " 14.142n"),
(f * 10 ** -7, " 141.421n"),
(f * 10 ** -6, " 1.414u"),
(f * 10 ** -5, " 14.142u"),
(f * 10 ** -4, " 141.421u"),
(f * 10 ** -3, " 1.414m"),
(f * 10 ** -2, " 14.142m"),
(f * 10 ** -1, " 141.421m"),
(f * 10 ** 0, " 1.414"),
(f * 10 ** 1, " 14.142"),
(f * 10 ** 2, " 141.421"),
(f * 10 ** 3, " 1.414k"),
(f * 10 ** 4, " 14.142k"),
(f * 10 ** 5, " 141.421k"),
(f * 10 ** 6, " 1.414M"),
(f * 10 ** 7, " 14.142M"),
(f * 10 ** 8, " 141.421M"),
(f * 10 ** 9, " 1.414G"),
(f * 10 ** 10, " 14.142G"),
(f * 10 ** 11, " 141.421G"),
(f * 10 ** 12, " 1.414T"),
(f * 10 ** 13, " 14.142T"),
(f * 10 ** 14, " 141.421T"),
(f * 10 ** 15, " 1.414P"),
(f * 10 ** 16, " 14.142P"),
(f * 10 ** 17, " 141.421P"),
(f * 10 ** 18, " 1.414E"),
(f * 10 ** 19, " 14.142E"),
(f * 10 ** 20, " 141.421E"),
(f * 10 ** 21, " 1.414Z"),
(f * 10 ** 22, " 14.142Z"),
(f * 10 ** 23, " 141.421Z"),
(f * 10 ** 24, " 1.414Y"),
(f * 10 ** 25, " 14.142Y"),
(f * 10 ** 26, " 141.421Y")]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [(f * 10 ** -24, " 3.1416E-24"),
(f * 10 ** -23, " 31.4159E-24"),
(f * 10 ** -22, " 314.1593E-24"),
(f * 10 ** -21, " 3.1416E-21"),
(f * 10 ** -20, " 31.4159E-21"),
(f * 10 ** -19, " 314.1593E-21"),
(f * 10 ** -18, " 3.1416E-18"),
(f * 10 ** -17, " 31.4159E-18"),
(f * 10 ** -16, " 314.1593E-18"),
(f * 10 ** -15, " 3.1416E-15"),
(f * 10 ** -14, " 31.4159E-15"),
(f * 10 ** -13, " 314.1593E-15"),
(f * 10 ** -12, " 3.1416E-12"),
(f * 10 ** -11, " 31.4159E-12"),
(f * 10 ** -10, " 314.1593E-12"),
(f * 10 ** -9, " 3.1416E-09"),
(f * 10 ** -8, " 31.4159E-09"),
(f * 10 ** -7, " 314.1593E-09"),
(f * 10 ** -6, " 3.1416E-06"),
(f * 10 ** -5, " 31.4159E-06"),
(f * 10 ** -4, " 314.1593E-06"),
(f * 10 ** -3, " 3.1416E-03"),
(f * 10 ** -2, " 31.4159E-03"),
(f * 10 ** -1, " 314.1593E-03"),
(f * 10 ** 0, " 3.1416E+00"),
(f * 10 ** 1, " 31.4159E+00"),
(f * 10 ** 2, " 314.1593E+00"),
(f * 10 ** 3, " 3.1416E+03"),
(f * 10 ** 4, " 31.4159E+03"),
(f * 10 ** 5, " 314.1593E+03"),
(f * 10 ** 6, " 3.1416E+06"),
(f * 10 ** 7, " 31.4159E+06"),
(f * 10 ** 8, " 314.1593E+06"),
(f * 10 ** 9, " 3.1416E+09"),
(f * 10 ** 10, " 31.4159E+09"),
(f * 10 ** 11, " 314.1593E+09"),
(f * 10 ** 12, " 3.1416E+12"),
(f * 10 ** 13, " 31.4159E+12"),
(f * 10 ** 14, " 314.1593E+12"),
(f * 10 ** 15, " 3.1416E+15"),
(f * 10 ** 16, " 31.4159E+15"),
(f * 10 ** 17, " 314.1593E+15"),
(f * 10 ** 18, " 3.1416E+18"),
(f * 10 ** 19, " 31.4159E+18"),
(f * 10 ** 20, " 314.1593E+18"),
(f * 10 ** 21, " 3.1416E+21"),
(f * 10 ** 22, " 31.4159E+21"),
(f * 10 ** 23, " 314.1593E+21"),
(f * 10 ** 24, " 3.1416E+24"),
(f * 10 ** 25, " 31.4159E+24"),
(f * 10 ** 26, " 314.1593E+24")]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [(5.55555, ' 5.556'),
(55.5555, ' 55.556'),
(555.555, ' 555.555'),
(5555.55, ' 5.556k'),
(55555.5, ' 55.556k'),
(555555, ' 555.555k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [(5.55555, ' 5.6'),
(55.5555, ' 55.6'),
(555.555, ' 555.6'),
(5555.55, ' 5.6k'),
(55555.5, ' 55.6k'),
(555555, ' 555.6k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [(5.55555, ' 6'),
(55.5555, ' 56'),
(555.555, ' 556'),
(5555.55, ' 6k'),
(55555.5, ' 56k'),
(555555, ' 556k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
self.assertEqual(result, u(' 0.000'))
def _three_digit_exp():
return '%.4g' % 1.7e8 == '1.7e+008'
class TestFloatArrayFormatter(tm.TestCase):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
self.assertTrue(len(result) == 0)
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
self.assertEqual(result[0], " 12")
self.assertEqual(result[1], " 0")
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d=pd.DataFrame({'col1':[9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7, 5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6, 4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
expected_output={
(0,6):' col1\n0 9.999000e-08\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07',
(1,6):' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07',
(1,8):' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07\n6 5.000100e-07\n7 6.000000e-07',
(8,16):' col1\n8 9.999000e-07\n9 1.000000e-06\n10 1.000100e-06\n11 2.000000e-06\n12 4.999000e-06\n13 5.000000e-06\n14 5.000100e-06\n15 6.000000e-06',
(9,16):' col1\n9 0.000001\n10 0.000001\n11 0.000002\n12 0.000005\n13 0.000005\n14 0.000005\n15 0.000006'
}
for (start, stop), v in expected_output.items():
self.assertEqual(str(d[start:stop]), v)
def test_too_long(self):
# GH 10451
with pd.option_context('display.precision', 4):
# need both a number > 1e8 and something that normally formats to having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
self.assertEqual(str(df), ' x\n0 12345.6789')
df = pd.DataFrame(dict(x=[2e8]))
self.assertEqual(str(df), ' x\n0 200000000')
df = pd.DataFrame(dict(x=[12345.6789, 2e8]))
self.assertEqual(str(df), ' x\n0 1.2346e+04\n1 2.0000e+08')
class TestRepr_timedelta64(tm.TestCase):
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base()
self.assertEqual(drepr(delta_1d), "1 days")
self.assertEqual(drepr(-delta_1d), "-1 days")
self.assertEqual(drepr(delta_0d), "0 days")
self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_even_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='even_day')
self.assertEqual(drepr(delta_1d), "1 days")
self.assertEqual(drepr(-delta_1d), "-1 days")
self.assertEqual(drepr(delta_0d), "0 days")
self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='sub_day')
self.assertEqual(drepr(delta_1d), "1 days")
self.assertEqual(drepr(-delta_1d), "-1 days")
self.assertEqual(drepr(delta_0d), "00:00:00")
self.assertEqual(drepr(delta_1s), "00:00:01")
self.assertEqual(drepr(delta_500ms), "00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='long')
self.assertEqual(drepr(delta_1d), "1 days 00:00:00")
self.assertEqual(drepr(-delta_1d), "-1 days +00:00:00")
self.assertEqual(drepr(delta_0d), "0 days 00:00:00")
self.assertEqual(drepr(delta_1s), "0 days 00:00:01")
self.assertEqual(drepr(delta_500ms), "0 days 00:00:00.500000")
self.assertEqual(drepr(delta_1d + delta_1s), "1 days 00:00:01")
self.assertEqual(drepr(delta_1d + delta_500ms), "1 days 00:00:00.500000")
def test_all(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1ns = pd.to_timedelta(1, unit='ns')
drepr = lambda x: x._repr_base(format='all')
self.assertEqual(drepr(delta_1d), "1 days 00:00:00.000000000")
self.assertEqual(drepr(delta_0d), "0 days 00:00:00.000000000")
self.assertEqual(drepr(delta_1ns), "0 days 00:00:00.000000001")
class TestTimedelta64Formatter(tm.TestCase):
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
self.assertEqual(result[1].strip(), "'1 days'")
result = fmt.Timedelta64Formatter(x[1:2],box=True).get_result()
self.assertEqual(result[0].strip(), "'1 days'")
result = fmt.Timedelta64Formatter(x,box=False).get_result()
self.assertEqual(result[0].strip(), "0 days")
self.assertEqual(result[1].strip(), "1 days")
result = fmt.Timedelta64Formatter(x[1:2],box=False).get_result()
self.assertEqual(result[0].strip(), "1 days")
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
self.assertEqual(result[1].strip(), "'-1 days'")
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y,box=True).get_result()
self.assertEqual(result[0].strip(), "'00:00:00'")
self.assertEqual(result[1].strip(), "'00:00:01'")
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y,box=True).get_result()
self.assertEqual(result[0].strip(), "'00:00:00'")
self.assertEqual(result[1].strip(), "'-1 days +23:59:59'")
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x,box=True).get_result()
self.assertEqual(result[0].strip(), "'0 days'")
class TestDatetime64Formatter(tm.TestCase):
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 00:00:00")
self.assertEqual(result[1].strip(), "2013-01-01 12:00:00")
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01")
self.assertEqual(result[1].strip(), "2013-01-02")
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "1970-01-01 00:00:00.000000200")
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range('20130101 09:00:00',periods=5,freq='D'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-05 09:00:00")
x = Series(date_range('20130101 09:00:00',periods=5,freq='s'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:04")
x = Series(date_range('20130101 09:00:00',periods=5,freq='ms'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.004")
x = Series(date_range('20130101 09:00:00',periods=5,freq='us'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000000")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.000004")
x = Series(date_range('20130101 09:00:00',periods=5,freq='N'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000000000")
self.assertEqual(result[1].strip(), "NaT")
self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.000000004")
class TestNaTFormatting(tm.TestCase):
def test_repr(self):
self.assertEqual(repr(pd.NaT), "NaT")
def test_str(self):
self.assertEqual(str(pd.NaT), "NaT")
class TestDatetimeIndexFormat(tm.TestCase):
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
self.assertEqual(formatted[0], "2003-01-01 12:00:00")
self.assertEqual(formatted[1], "NaT")
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
self.assertEqual(formatted[0], "2003-01-01")
self.assertEqual(formatted[1], "NaT")
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013,1,1)], utc=True).format()
self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
formatted = pd.to_datetime([datetime(2013,1,1), pd.NaT], utc=True).format()
self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
def test_date_explict_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(date_format="%m-%d-%Y", na_rep="UT")
self.assertEqual(formatted[0], "02-01-2003")
self.assertEqual(formatted[1], "UT")
class TestDatetimeIndexUnicode(tm.TestCase):
def test_dates(self):
text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1)]))
self.assertTrue("['2013-01-01'," in text)
self.assertTrue(", '2014-01-01']" in text)
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1,12), datetime(2014,1,1)]))
self.assertTrue("'2013-01-01 00:00:00'," in text)
self.assertTrue("'2014-01-01 00:00:00']" in text)
class TestStringRepTimestamp(tm.TestCase):
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
ts_nanos_only = Timestamp(200)
self.assertEqual(str(ts_nanos_only), "1970-01-01 00:00:00.000000200")
ts_nanos_micros = Timestamp(1200)
self.assertEqual(str(ts_nanos_micros), "1970-01-01 00:00:00.000001200")
def test_tz_pytz(self):
tm._skip_if_no_pytz()
import pytz
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
def test_tz_dateutil(self):
tm._skip_if_no_dateutil()
import dateutil
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
apache-2.0
|
Ledoux/ShareYourSystem
|
Pythonlogy/build/lib/ShareYourSystem/Standards/Viewers/Pyploter/11_ExampleDoc.py
|
2
|
2392
|
#ImportModules
import ShareYourSystem as SYS
#figure
MyPyploter=SYS.PyploterClass(
).mapSet(
{
'-Panels':
[
('|A',
{
'-Charts':
[
('|a',{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[0,1,2]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}),
('|1',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[0,1,2],
[2,1,0]
],
'#kwarg':{
'linestyle':"--",
'color':'r'
}
}
)
],
})
]
}),
('|b',{
'PyplotingShiftVariable':[2,0],
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[1,1,1]
],
'#kwarg':{
'linestyle':"-",
'marker':'o'
}
}
)
]
})
]
})
]
}
),
('|B',
{
'PyplotingShiftVariable':[0,3],
'-Charts':
[
('|a',{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[4,2,4]
],
'#kwarg':{
'linestyle':"-",
'marker':'o'
}
}
)
]
})
]
}),('|b',{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[0,10,1]
],
'#kwarg':{
'linestyle':"-",
'marker':'o'
}
}
)
]
})
]
})
]
}
)
]
}
).pyplot(
)
#print
print('MyPyploter is ')
SYS._print(MyPyploter)
#show
SYS.matplotlib.pyplot.show()
"""
"""
"""
"""
|
mit
|
gweidner/incubator-systemml
|
src/main/python/tests/test_mllearn_numpy.py
|
12
|
8831
|
#!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
from sklearn import linear_model
sparkSession = SparkSession.builder.getOrCreate()
def writeColVector(X, fileName):
fileName = os.path.join(os.getcwd(), fileName)
X.tofile(fileName, sep='\n')
metaDataFileContent = '{ "data_type": "matrix", "value_type": "double", "rows":' + str(len(X)) + ', "cols": 1, "nnz": -1, "format": "csv", "author": "systemml-tests", "created": "0000-00-00 00:00:00 PST" }'
with open(fileName+'.mtd', 'w') as text_file:
text_file.write(metaDataFileContent)
def deleteIfExists(fileName):
try:
os.remove(fileName)
except OSError:
pass
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
logistic = LogisticRegression(sparkSession)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_logistic_mlpipeline(self):
training = sparkSession.createDataFrame([
("a b c d e spark", 1.0),
("b d", 2.0),
("spark f g h", 1.0),
("hadoop mapreduce", 2.0),
("b spark who", 1.0),
("g d a y", 2.0),
("spark fly", 1.0),
("was mapreduce", 2.0),
("e spark program", 1.0),
("a e c l", 2.0),
("spark compile", 1.0),
("hadoop software", 2.0)
], ["text", "label"])
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol="words", outputCol="features", numFeatures=20)
lr = LogisticRegression(sparkSession)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
model = pipeline.fit(training)
test = sparkSession.createDataFrame([
("spark i j k", 1.0),
("l m n", 2.0),
("mapreduce spark", 1.0),
("apache hadoop", 2.0)], ["text", "label"])
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator()
score = evaluator.evaluate(predictionAndLabels)
self.failUnless(score == 1.0)
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, tol=0.0001)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
accuracy = accuracy_score(sklearn_predicted, mllearn_predicted)
evaluation = 'test_svm accuracy_score(sklearn_predicted, mllearn_predicted) was {}'.format(accuracy)
self.failUnless(accuracy > 0.95, evaluation)
def test_naive_bayes(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(X_train, y_train).predict(X_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
def test_naive_bayes1(self):
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
vectorizer = TfidfVectorizer()
# Both vectors and vectors_test are SciPy CSR matrix
vectors = vectorizer.fit_transform(newsgroups_train.data)
vectors_test = vectorizer.transform(newsgroups_test.data)
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(vectors, newsgroups_train.target).predict(vectors_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(vectors, newsgroups_train.target).predict(vectors_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
assad2012/ggplot
|
ggplot/tests/test_chart_components.py
|
12
|
1664
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from nose.tools import assert_raises, assert_equal, assert_is_none
from ggplot import *
from ggplot.utils.exceptions import GgplotError
def test_chart_components():
"""
Test invalid arguments to chart components
"""
df = pd.DataFrame({'x': np.arange(10),
'y': np.arange(10)})
gg = ggplot(df, aes(x='x', y='y'))
# test ggtitle
assert_raises(GgplotError, ggtitle, None)
# test xlim
assert_raises(GgplotError, xlim, "foo", 1)
assert_raises(GgplotError, xlim, "foo", "bar")
# test ylim
assert_raises(GgplotError, ylim, "foo", 1)
assert_raises(GgplotError, ylim, "foo", "bar")
# test xlab
assert_raises(GgplotError, ylab, None)
# test ylab
assert_raises(GgplotError, ylab, None)
# test labs
test_xlab = 'xlab'
gg_xlab = gg + labs(x=test_xlab)
assert_equal(gg_xlab.xlab, test_xlab)
assert_is_none(gg_xlab.ylab)
assert_is_none(gg_xlab.title)
test_ylab = 'ylab'
gg_ylab = gg + labs(y=test_ylab)
assert_is_none(gg_ylab.xlab)
assert_equal(gg_ylab.ylab, test_ylab)
assert_is_none(gg_ylab.title)
test_title = 'title'
gg_title = gg + labs(title=test_title)
assert_is_none(gg_title.xlab)
assert_is_none(gg_title.ylab)
assert_equal(gg_title.title, test_title)
gg_labs = gg + labs(x=test_xlab, y=test_ylab, title=test_title)
assert_equal(gg_labs.xlab, test_xlab)
assert_equal(gg_labs.ylab, test_ylab)
assert_equal(gg_labs.title, test_title)
|
bsd-2-clause
|
ellisonbg/altair
|
altair/vegalite/v1/schema/channels.py
|
1
|
29550
|
# -*- coding: utf-8 -*-
#
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
import six
from . import core
import pandas as pd
from altair.utils.schemapi import Undefined
from altair.utils import parse_shorthand
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
if self.shorthand is Undefined:
kwds = {}
elif isinstance(self.shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(shorthand, **kwds).to_dict()
for shorthand in self.shorthand]
elif isinstance(self.shorthand, six.string_types):
kwds = parse_shorthand(self.shorthand, data=context.get('data', None))
type_defined = self._kwds.get('type', Undefined) is not Undefined
if not (type_defined or 'type' in kwds):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{0} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(self.shorthand))
else:
raise ValueError("{0} encoding field is specified without a type; "
"the type cannot be automacially inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(self.shorthand))
else:
# shorthand is not a string; we pass the definition to field
if self.field is not Undefined:
raise ValueError("both shorthand and field specified in {0}"
"".format(self.__class__.__name__))
# field is a RepeatSpec or similar; cannot infer type
kwds = {'field': self.shorthand}
# set shorthand to Undefined, because it's not part of the schema
self.shorthand = Undefined
self._kwds.update({k: v for k, v in kwds.items()
if self._kwds.get(k, Undefined) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy()
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class Row(FieldChannelMixin, core.PositionChannelDef):
"""Row schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
axis : Axis
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Row, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, scale=scale, sort=sort, timeUnit=timeUnit, title=title,
type=type, value=value, **kwds)
class Column(FieldChannelMixin, core.PositionChannelDef):
"""Column schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
axis : Axis
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Column, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, scale=scale, sort=sort, timeUnit=timeUnit,
title=title, type=type, value=value, **kwds)
class X(FieldChannelMixin, core.PositionChannelDef):
"""X schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
axis : Axis
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(X, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, scale=scale, sort=sort, timeUnit=timeUnit, title=title,
type=type, value=value, **kwds)
class Y(FieldChannelMixin, core.PositionChannelDef):
"""Y schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
axis : Axis
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Y, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, scale=scale, sort=sort, timeUnit=timeUnit, title=title,
type=type, value=value, **kwds)
class X2(FieldChannelMixin, core.FieldDef):
"""X2 schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined, **kwds):
super(X2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, value=value, **kwds)
class Y2(FieldChannelMixin, core.FieldDef):
"""Y2 schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined, **kwds):
super(Y2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, value=value, **kwds)
class Color(FieldChannelMixin, core.ChannelDefWithLegend):
"""Color schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
legend : Legend
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Color, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
legend=legend, scale=scale, sort=sort, timeUnit=timeUnit,
title=title, type=type, value=value, **kwds)
class Opacity(FieldChannelMixin, core.ChannelDefWithLegend):
"""Opacity schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
legend : Legend
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Opacity, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
legend=legend, scale=scale, sort=sort, timeUnit=timeUnit,
title=title, type=type, value=value, **kwds)
class Size(FieldChannelMixin, core.ChannelDefWithLegend):
"""Size schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
legend : Legend
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Size, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
legend=legend, scale=scale, sort=sort, timeUnit=timeUnit,
title=title, type=type, value=value, **kwds)
class Shape(FieldChannelMixin, core.ChannelDefWithLegend):
"""Shape schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
legend : Legend
scale : Scale
sort : anyOf(SortOrder, SortField)
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined,
type=Undefined, value=Undefined, **kwds):
super(Shape, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
legend=legend, scale=scale, sort=sort, timeUnit=timeUnit,
title=title, type=type, value=value, **kwds)
class Detail(FieldChannelMixin, core.FieldDef):
"""Detail schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined, **kwds):
super(Detail, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, value=value, **kwds)
class Text(FieldChannelMixin, core.FieldDef):
"""Text schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined, **kwds):
super(Text, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, value=value, **kwds)
class Label(FieldChannelMixin, core.FieldDef):
"""Label schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined, **kwds):
super(Label, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, value=value, **kwds)
class Path(FieldChannelMixin, core.OrderChannelDef):
"""Path schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
sort : SortOrder
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined,
**kwds):
super(Path, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
sort=sort, timeUnit=timeUnit, title=title, type=type, value=value,
**kwds)
class Order(FieldChannelMixin, core.OrderChannelDef):
"""Order schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : AggregateOp
Aggregation function for the field (e.g., ``mean``, ``sum``, ``median``, ``min``,
``max``, ``count`` ).
bin : anyOf(Bin, boolean)
Flag for binning a ``quantitative`` field, or a bin property object for binning
parameters.
field : string
Name of the field from which to pull a data value.
sort : SortOrder
timeUnit : TimeUnit
Time unit for a ``temporal`` field (e.g., ``year``, ``yearmonth``, ``month``,
``hour`` ).
title : string
Title for axis or legend.
type : Type
The encoded field's type of measurement. This can be either a full type name (
``"quantitative"``, ``"temporal"``, ``"ordinal"``, and ``"nominal"`` ) or an
initial character of the type name ( ``"Q"``, ``"T"``, ``"O"``, ``"N"`` ). This
property is case insensitive.
value : anyOf(string, float, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined, value=Undefined,
**kwds):
super(Order, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
sort=sort, timeUnit=timeUnit, title=title, type=type, value=value,
**kwds)
|
bsd-3-clause
|
shiquanwang/pylearn2
|
pylearn2/sandbox/cuda_convnet/specialized_bench.py
|
5
|
3863
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
|
bsd-3-clause
|
mrcouts/Bootstrap-Paradox
|
Experimental/Aquisicoes/CTCt_circulo/aquisicoes.py
|
1
|
8603
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from math import pi as Pi
from txt2py import *
from matplotlib.ticker import MultipleLocator
A = txt2py("aquisicao_circulo_CTCt_lambda60_3.txt")
#aquisicao_circulo_PIDSMCx_lambda70_phi6_k70_0_3: e_quad = 0.912252649215 | tau_quad = 0.238929403956 | s1_quad = 4.83123738371 | s2_quad = 4.92973723999 | ex_quad = 0.693586588682 | ey_quad = 0.592572814091 | tau1_quad = 0.163618207999 | tau2_quad = 0.174115886943
#aquisicao_SMCt_lambda60_phi20_k363_3: e_quad = 1.0865600936 | tau_quad = 0.237820339378 | s1_quad = 55.7256386861 | s2_quad = 52.1114451708 | ex_quad = 0.89867749944 | ey_quad = 0.610730373405 | tau1_quad = 0.162677093788 | tau2_quad = 0.173478174358
#aquisicao_circulo_SMCx_lambda60_phi6_k103_0_3: e_quad = 1.27647821995 | tau_quad = 0.260967898131 | s1_quad = 8.35898181713 | s2_quad = 4.9935905567 | ex_quad = 0.971027682922 | ey_quad = 0.828554092984 | tau1_quad = 0.169215497632 | tau2_quad = 0.198671485664
#aquisicao_circulo_PIDx_lambda70_4: e_quad = 1.28260640104 | tau_quad = 0.219666166256 | s1_quad = 6.09596142926 | s2_quad = 4.45005347346 | ex_quad = 1.14463260263 | ey_quad = 0.578701464488 | tau1_quad = 0.149533211028 | tau2_quad = 0.16091315483
#aquisicao_circulo_CTCt_lambda60_3: e_quad = 1.61909535266 | tau_quad = 0.202338133235 | s1_quad = 45.0666924646 | s2_quad = 53.5093564972 | ex_quad = 1.31948403439 | ey_quad = 0.938313190784 | tau1_quad = 0.132682753043 | tau2_quad = 0.152761275217
#aquisicao_circulo_CTCx_lambda60_1: e_quad = 1.63913254193 | tau_quad = 0.212001438004 | s1_quad = 5.99433632576 | s2_quad = 4.31327608202 | ex_quad = 1.358628447 | ey_quad = 0.91699740076 | tau1_quad = 0.140617598848 | tau2_quad = 0.158654658331
#aquisicao_circulo_PIDSMCt_lambda35_phi20_k200_6: e_quad = 3.34857383374 | tau_quad = 0.198843595669 | s1_quad = 38.0594077422 | s2_quad = 36.1432633763 | ex_quad = 1.6688967167 | ey_quad = 2.90305536788 | tau1_quad = 0.135978269066 | tau2_quad = 0.14508165246
#aquisicao_circulo_PIDt_lambda35_11: e_quad = 4.45725019704 | tau_quad = 0.186064672078 | s1_quad = 34.1700477306 | s2_quad = 32.2174188766 | ex_quad = 2.3747188143 | ey_quad = 3.77197426714 | tau1_quad = 0.120771293755 | tau2_quad = 0.141542773748
#aquisicao_triangulo_SMCx_lambda60_phi6_k103_0_2: e_quad = 1.930624, i_quad = 4.677893, ex_quad = 1.499759, s1_quad = 8.5627540, s2_quad = 15.5908030, ey_quad = 1.215743, i1_quad = 3.047811, i2_quad = 3.548737
#aquisicao_triangulo_SMCt_lambda60_phi20_k363_2: e_quad = 2.315927, i_quad = 4.790667, ex_quad = 2.073283, s1_quad = 90.622643, s2_quad = 96.8548660, ey_quad = 1.031995, i1_quad = 3.142629, i2_quad = 3.615850
#aquisicao_triangulo_PIDSMCx_lambda60_phi6_k103_0_3: e_quad = 2.534649, i_quad = 4.721774, ex_quad = 0.985656, s1_quad = 9.2306470, s2_quad = 23.4585950, ey_quad = 2.335151, i1_quad = 2.900244, i2_quad = 3.726088
#aquisicao_triangulo_CTCx_lambda60_4: e_quad = 2.593759, i_quad = 4.709301, ex_quad = 2.198307, s1_quad = 10.335380, s2_quad = 15.6801630, ey_quad = 1.376601, i1_quad = 3.014247, i2_quad = 3.618262
#aquisicao_triangulo_CTCt_lambda60_5: e_quad = 2.607846, i_quad = 4.541315, ex_quad = 2.244732, s1_quad = 85.878242, s2_quad = 106.841187, ey_quad = 1.327417, i1_quad = 2.874919, i2_quad = 3.515448
#aquisicao_triangulo_PIDx_lambda70_6: e_quad = 2.803854, i_quad = 4.803226, ex_quad = 1.434845, s1_quad = 10.004310, s2_quad = 23.8964730, ey_quad = 2.408903, i1_quad = 2.968096, i2_quad = 3.776425
#aquisicao_triangulo_PIDSMCt_lambda25_phi90_k200_2: e_quad = 7.304744, i_quad = 5.331062, ex_quad = 4.019952, s1_quad = 81.700752, s2_quad = 70.6879430, ey_quad = 6.099121, i1_quad = 3.646859, i2_quad = 3.888527
#aquisicao_triangulo_PIDt_lambda25_2: e_quad = 7.615739, i_quad = 4.938209, ex_quad = 3.926870, s1_quad = 70.306961, s2_quad = 66.9731520, ey_quad = 6.525272, i1_quad = 3.150745, i2_quad = 3.802461
end = 3000
t_np = np.array([A[i][0] for i in range(end)])
x_np = np.array([A[i][1] for i in range(end)])
y_np = np.array([A[i][2] for i in range(end)])
xref_np= np.array([A[i][3] for i in range(end)])
yref_np= np.array([A[i][4] for i in range(end)])
ex_np = np.array([A[i][5] for i in range(end)])
ey_np = np.array([A[i][6] for i in range(end)])
i1_np = np.array([A[i][7] for i in range(end)])
i2_np = np.array([A[i][8] for i in range(end)])
u1_np = np.array([A[i][9] for i in range(end)])
u2_np = np.array([A[i][10] for i in range(end)])
s1_np = np.array([A[i][11] for i in range(end)])
s2_np = np.array([A[i][12] for i in range(end)])
x_np = 1000.0*x_np
y_np = 1000.0*y_np
xref_np = 1000.0*xref_np
yref_np = 1000.0*yref_np
ex_np = 1000.0*ex_np
ey_np = 1000.0*ey_np
t_np = 0.001*t_np
tau1_np = 0.055984*i1_np
tau2_np = 0.0566596*i2_np
ex_f = 0
ey_f = 0
for i in range(end-33,end):
ex_f += ex_np[i]
ey_f += ey_np[i]
ex_f = ex_f/33.0
ey_f = ey_f/33.0
e_f = (ex_f**2 + ey_f**2)**0.5
print "ex_f =", ex_f, "| ey_f =", ey_f, "| e_f =", e_f
"""T = 1000
Ta = 3
n2 = (2-1)*T/3
n8 = (8-1)*T/3
print n2
print n8
ex_quad = 0.0
ey_quad = 0.0
tau1_quad = 0.0
tau2_quad = 0.0
s1_quad = 0.0
s2_quad = 0.0
for i in range(n2,n8):
ex_quad += ex_np[i]**2
ey_quad += ey_np[i]**2
tau1_quad += tau1_np[i]**2
tau2_quad += tau2_np[i]**2
s1_quad += s1_np[i]**2
s2_quad += s2_np[i]**2
ex_quad = 1000*(2*ex_quad/(n8-n2))**0.5
ey_quad = 1000*(2*ey_quad/(n8-n2))**0.5
e_quad = (ex_quad**2 + ey_quad**2)**0.5
tau1_quad = (2*tau1_quad/(n8-n2))**0.5
tau2_quad = (2*tau2_quad/(n8-n2))**0.5
tau_quad = (tau1_quad**2 + tau2_quad**2)**0.5
s1_quad = (2*s1_quad/(n8-n2))**0.5
s2_quad = (2*s2_quad/(n8-n2))**0.5
print "e_quad =", e_quad, "| tau_quad =", tau_quad, "| s1_quad =", s1_quad, "| s2_quad =", s2_quad, "| ex_quad =", ex_quad, "| ey_quad =", ey_quad, "| tau1_quad =", tau1_quad, "| tau2_quad =", tau2_quad
"""
ml_minor = MultipleLocator(1)
ml_major = MultipleLocator(4)
fig, ax = plt.subplots()
ax.plot(xref_np, yref_np, 'b', linewidth=1, label= 'Refer' + u'ê' 'ncia')
ax.plot(x_np, y_np, 'r', linewidth=1, label='Trajet' + u'ó' + 'ria real')
plt.xlabel(r'$x[mm]$', fontsize=18)
plt.ylabel(r'$y[mm]$', fontsize=18)
plt.axis('equal')
plt.title('Trajet' + u'ó' + 'ria realizada')
ax.legend(loc=4, ncol=1, prop={'size': 10})
plt.savefig('xy.png')
plt.figure()
plt.plot(t_np, ex_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$', fontsize=18)
plt.ylabel(r'$e_x[mm]$', fontsize=18)
plt.ylim(-9.5, 9.5)
plt.axes().yaxis.set_minor_locator(ml_minor)
plt.axes().yaxis.set_major_locator(ml_major)
plt.title('Erro de posi' + u'ç' + u'ã' + 'o em fun' + u'ç' + u'ã' 'o do tempo (coordenada x)')
plt.savefig('ex.png')
plt.figure()
plt.plot(t_np, ey_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$', fontsize=18)
plt.ylabel(r'$e_y[mm]$', fontsize=18)
plt.ylim(-9.5, 9.5)
plt.axes().yaxis.set_minor_locator(ml_minor)
plt.axes().yaxis.set_major_locator(ml_major)
plt.title('Erro de posi' + u'ç' + u'ã' + 'o em fun' + u'ç' + u'ã' 'o do tempo (coordenada y)')
plt.savefig('ey.png')
plt.figure()
plt.plot(t_np, tau1_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$', fontsize=18)
plt.ylabel(r'$\tau_1[Nm]$', fontsize=18)
plt.ylim(-0.7, 0.7)
plt.title('Torque aplicado ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 1)')
plt.savefig('tau1.png')
plt.figure()
plt.plot(t_np, tau2_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$', fontsize=18)
plt.ylabel(r'$\tau_2[Nm]$', fontsize=18)
plt.ylim(-0.7, 0.7)
plt.title('Torque aplicado ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 2)')
plt.savefig('tau2.png')
plt.figure()
plt.plot(t_np, u1_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$')
plt.ylabel(r'$u_1[V]$')
plt.title('Tens' + u'ã' + 'o aplicada ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 1)')
plt.savefig('u1.png')
plt.figure()
plt.plot(t_np, u2_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$')
plt.ylabel(r'$u_2[V]$')
plt.title('Tens' + u'ã' + 'o aplicada ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 2)')
plt.savefig('u2.png')
plt.figure()
plt.plot(t_np, s1_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$')
plt.ylabel(r'$s_1[rad/s^2]$')
plt.title('Vari' + u'á' + 'vel de escorregamento ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 1)')
plt.savefig('s1.png')
plt.figure()
plt.plot(t_np, s2_np, 'r', linewidth=1.01)
plt.xlabel(r'$t[s]$')
plt.ylabel(r'$s_2[rad/s^2]$')
plt.title('Vari' + u'á' + 'vel de escorregamento ' + 'em fun' + u'ç' + u'ã' 'o do tempo (atuador 2)')
plt.savefig('s2.png')
|
gpl-3.0
|
srio/Orange-XOPPY
|
orangecontrib/xoppy/util/srcalc/srcalc.py
|
1
|
29993
|
#*********************************************************************************************************
#*********************************************************************************************************
#*********************************************************************************************************
#
# auxiliar functions for SRCALC
#
#*********************************************************************************************************
#*********************************************************************************************************
import numpy
def load_srcalc_output_file(filename="D_IDPower.TXT",skiprows=5,four_quadrants=True,
do_plot=False,verbose=True):
"""
:param filename:
:param skiprows:
:param four_quadrants:
:param do_plot:
:param verbose:
:return:
out_dictionary: Zlist 0 (63, 43) <== source
out_dictionary: Zlist 1 (63, 43) <== oe1
out_dictionary: Zlist 2 (63, 43) <== oe2
out_dictionary: X (63,)
out_dictionary: Y (43,)
out_dictionary: RAWDATA (704, 3)
out_dictionary: from D_IDPower.TXT: NELEMENTS
"""
a = numpy.loadtxt(filename,skiprows=skiprows)
f = open(filename,'r')
line = f.readlines()
f.close()
npx = int(line[0])
xps = float(line[1])
npy = int(line[2])
yps = float(line[3])
nMirr = int(line[4])
if nMirr == 0:
a.shape = (a.size,1)
SOURCE = numpy.zeros((nMirr+1,npx,npy))
ii = -1
for ix in range(npx):
for iy in range(npy):
ii += 1
for icol in range(nMirr+1):
SOURCE[icol,ix,iy] = a[ii,icol]
hh = numpy.linspace(0,0.5 * xps,npx)
vv = numpy.linspace(0,0.5 * yps,npy)
hhh = numpy.concatenate((-hh[::-1], hh[1:]))
vvv = numpy.concatenate((-vv[::-1], vv[1:]))
int_mesh1 = []
int_mesh2 = []
for i in range(nMirr+1):
int_mesh = SOURCE[i,:,:].copy()
# normalize every pixel to absolute power (W and not W/mm2)
int_mesh *= (hh[1] - hh[0]) * (vv[1] - vv[0])
int_mesh1.append(int_mesh)
tmp = numpy.concatenate((int_mesh[::-1, :], int_mesh[1:, :]), axis=0)
int_mesh2.append( numpy.concatenate((tmp[:, ::-1], tmp[:, 1:]), axis=1) )
if do_plot:
from srxraylib.plot.gol import plot_image
if four_quadrants:
# totPower = int_mesh2[i].sum() * (hhh[1] - hhh[0]) * (vvv[1] - vvv[0])
totPower2 = trapezoidal_rule_2d_1darrays(int_mesh2[i],hhh,vvv)
plot_image(int_mesh2[i],hhh,vvv,title=">>%d<< Source Tot Power %f, pow density: %f"%(i,totPower2,int_mesh2[i].max()),show=True)
else:
# totPower = int_mesh1[i].sum() * (hh[1] - hh[0]) * (vv[1] - vv[0])
totPower2 = trapezoidal_rule_2d_1darrays(int_mesh1[i],hh,vv)
plot_image(int_mesh1[i], hh, vv,
title=">>%d<< Source Tot Power %f, pow density: %f" % (i, totPower2, int_mesh2[i].max()),
show=True)
if four_quadrants:
out_dictionary = {"Zlist": int_mesh2, "X": hhh, "Y": vvv, "RAWDATA": a, "NELEMENTS": nMirr}
else:
out_dictionary = {"Zlist": int_mesh1, "X": hh, "Y": vv, "RAWDATA": a, "NELEMENTS": nMirr}
if verbose:
for key in out_dictionary.keys():
if isinstance(out_dictionary[key],numpy.ndarray):
print(">>1>> out_dictionary: from D_IDPower.TXT: ", key, out_dictionary[key].shape)
elif isinstance(out_dictionary[key],list):
for i in range(len(out_dictionary[key])):
print(">>1>> out_dictionary: from D_IDPower.TXT: ", key, i, out_dictionary[key][i].shape)
else:
print(">>1>> out_dictionary: from D_IDPower.TXT: ", key)
return out_dictionary
def ray_tracing(
out_dictionary,
SOURCE_SCREEN_DISTANCE=13.73,
number_of_elements=1,
oe_parameters= {
"EL0_SHAPE":2,
"EL0_P_POSITION":13.73,
"EL0_Q_POSITION":0.0,
"EL0_P_FOCUS":0.0,
"EL0_Q_FOCUS":0.0,
"EL0_ANG":88.75,
"EL0_THICKNESS":1000,
"EL0_RELATIVE_TO_PREVIOUS":2,
},
real_space_shuffle=[0,0,0],
accumulate_results=True,
store_footprint=True,
store_image=True,
verbose=False,
run_index=None,
undo_shadow_orientation_angle_rotation=False):
"""
:param out_dictionary:
:param SOURCE_SCREEN_DISTANCE:
:param number_of_elements:
:param oe_parameters:
:param real_space_shuffle:
:param accumulate_results:
:param verbose:
:return:
adds in the out_dictionary the keys:
OE_FOOTPRINT: list[oe_index] ndarray(3, 2709) (shadow: col2,col1,col23)
OE_IMAGE: list[oe_index] ndarray(3, 2709) (shadow: col2,col1,col23)
"""
from orangecontrib.xoppy.util.srcalc.beam import Beam
from orangecontrib.xoppy.util.srcalc.conic import Conic
from orangecontrib.xoppy.util.srcalc.toroid import Toroid
# from shadow4.beam.beam import Beam
# from shadow4.optical_surfaces.conic import Conic
# from shadow4.optical_surfaces.toroid import Toroid
#
# compute shadow beam from urgent results
#
vx = out_dictionary["X"] / ( 1e3 * SOURCE_SCREEN_DISTANCE )
vz = out_dictionary["Y"] / ( 1e3 * SOURCE_SCREEN_DISTANCE )
nrays = vx.size * vz.size
VX = numpy.outer(vx, numpy.ones_like(vz)).flatten()
VZ = numpy.outer(numpy.ones_like(vx), vz).flatten()
VY = numpy.sqrt(1.0 - VX ** 2 + VZ ** 2).flatten()
X = numpy.ones(nrays) * real_space_shuffle[0]
Y = numpy.ones(nrays) * real_space_shuffle[1]
Z = numpy.ones(nrays) * real_space_shuffle[2]
# print(VY)
beam = Beam.initialize_as_pencil(N=nrays)
beam.set_column(1, X)
beam.set_column(2, Y)
beam.set_column(3, Z)
beam.set_column(4, VX)
beam.set_column(5, VY)
beam.set_column(6, VZ)
# if dump_shadow_files:
# from shadow4.compatibility.beam3 import Beam3
# beam.set_column(7, (numpy.sqrt(out_dictionary["Zlist"][0]).flatten()))
# beam.set_column(8, 0.0)
# beam.set_column(9, 0.0)
# beam.set_column(16, 0.0)
# beam.set_column(17, 0.0)
# beam.set_column(18, 0.0)
# if run_index is None:
# filename = 'begin_srcalc.dat'
# else:
# filename = 'begin_srcalc_%03d.dat' % run_index
# Beam3.initialize_from_shadow4_beam(beam).write(filename)
# print("File written to disk: %s" % filename)
OE_FOOTPRINT = []
OE_IMAGE = []
for oe_index in range(number_of_elements):
p = oe_parameters["EL%d_P_POSITION" % oe_index]
q = oe_parameters["EL%d_Q_POSITION" % oe_index]
theta_grazing = (90.0 - oe_parameters["EL%d_ANG" % oe_index]) * numpy.pi / 180
if oe_parameters["EL%d_RELATIVE_TO_PREVIOUS"%oe_index] == 0:
alpha = 90.0 * numpy.pi / 180
elif oe_parameters["EL%d_RELATIVE_TO_PREVIOUS"%oe_index] == 1:
alpha = 270.0 * numpy.pi / 180
elif oe_parameters["EL%d_RELATIVE_TO_PREVIOUS"%oe_index] == 2:
alpha = 0.0
elif oe_parameters["EL%d_RELATIVE_TO_PREVIOUS"%oe_index] == 3:
alpha = 180.0 * numpy.pi / 180
if oe_parameters["EL%d_SHAPE"%oe_index] == 0: # "Toroidal mirror",
is_conic = False
toroid = Toroid()
toroid.set_from_focal_distances(
oe_parameters["EL%d_P_FOCUS" % oe_index],
oe_parameters["EL%d_Q_FOCUS" % oe_index],
theta_grazing)
elif oe_parameters["EL%d_SHAPE"%oe_index] == 1: # "Spherical mirror",
is_conic = True
ccc = Conic.initialize_as_sphere_from_focal_distances(
oe_parameters["EL%d_P_FOCUS" % oe_index],
oe_parameters["EL%d_Q_FOCUS" % oe_index],
theta_grazing,cylindrical=0,cylangle=0,switch_convexity=0)
elif oe_parameters["EL%d_SHAPE"%oe_index] == 2: # "Plane mirror",
is_conic = True
ccc = Conic.initialize_as_plane()
elif oe_parameters["EL%d_SHAPE"%oe_index] == 3: # "MerCyl mirror",
is_conic = True
ccc = Conic.initialize_as_sphere_from_focal_distances(
oe_parameters["EL%d_P_FOCUS" % oe_index],
oe_parameters["EL%d_Q_FOCUS" % oe_index],
theta_grazing,cylindrical=1,cylangle=0,switch_convexity=0)
elif oe_parameters["EL%d_SHAPE"%oe_index] == 4: # "SagCyl mirror",
raise Exception("Not implemented")
elif oe_parameters["EL%d_SHAPE"%oe_index] == 5: # "Ellipsoidal mirror",
is_conic = True
ccc = Conic.initialize_as_ellipsoid_from_focal_distances(
oe_parameters["EL%d_P_FOCUS" % oe_index],
oe_parameters["EL%d_Q_FOCUS" % oe_index],
theta_grazing,cylindrical=0,cylangle=0,switch_convexity=0)
elif oe_parameters["EL%d_SHAPE"%oe_index] == 6: # "MerEll mirror",
is_conic = True
ccc = Conic.initialize_as_ellipsoid_from_focal_distances(
oe_parameters["EL%d_P_FOCUS" % oe_index],
oe_parameters["EL%d_Q_FOCUS" % oe_index],
theta_grazing,cylindrical=1,cylangle=0,switch_convexity=0)
elif oe_parameters["EL%d_SHAPE"%oe_index] == 7: # "SagEllip mirror",
raise Exception("Not implemented")
elif oe_parameters["EL%d_SHAPE"%oe_index] == 8: # "Filter",
is_conic = True
ccc = Conic.initialize_as_plane()
elif oe_parameters["EL%d_SHAPE"%oe_index] == 9: # "Crystal"
is_conic = True
ccc = Conic.initialize_as_plane()
if oe_index == 0:
newbeam = beam.duplicate()
#
# put beam in mirror reference system
#
newbeam.rotate(alpha, axis=2)
newbeam.rotate(theta_grazing, axis=1)
newbeam.translation([0.0, -p * numpy.cos(theta_grazing), p * numpy.sin(theta_grazing)])
#
# reflect beam in the mirror surface and dump mirr.01
#
if is_conic:
if verbose:
print("\n\nElement %d is CONIC :\n" % (1 + oe_index), ccc.info())
newbeam = ccc.apply_specular_reflection_on_beam(newbeam)
else:
if verbose:
print("\n\nElement %d is TOROIDAL :\n" % (1 + oe_index), toroid.info())
newbeam = toroid.apply_specular_reflection_on_beam(newbeam)
print("\n p: %f m" % p)
print(" q: %f m" % q)
print(" p (focal): %f m" % oe_parameters["EL%d_P_FOCUS" % oe_index] )
print(" q (focal): %f m" % oe_parameters["EL%d_Q_FOCUS" % oe_index] )
print(" alpha: %f rad = %f deg" % (alpha, alpha*180/numpy.pi) )
print(" theta_grazing: %f rad = %f deg" % (theta_grazing, theta_grazing*180/numpy.pi) )
print(" theta_normal: %f rad = %f deg \n" % (numpy.pi/2 - theta_grazing, 90 - theta_grazing * 180 / numpy.pi))
# if dump_shadow_files:
# from shadow4.compatibility.beam3 import Beam3
# newbeam.set_column(7, (numpy.sqrt(out_dictionary["Zlist"][oe_index] - out_dictionary["Zlist"][oe_index + 1]).flatten()))
# newbeam.set_column(8, 0.0)
# newbeam.set_column(9, 0.0)
# newbeam.set_column(16, 0.0)
# newbeam.set_column(17, 0.0)
# newbeam.set_column(18, 0.0)
# if run_index is None:
# filename = 'mirr_srcalc.%02d' % (oe_index+1)
# else:
# filename = 'mirr_srcalc_%03d.%02d' % (run_index, oe_index+1)
# Beam3.initialize_from_shadow4_beam(newbeam).write(filename)
# print("File written to disk: %s" % filename)
tmp = newbeam.get_columns((2, 1, 23))
tmp[2,:] = (out_dictionary["Zlist"][oe_index+1]).flatten()
OE_FOOTPRINT.append( tmp )
#
# put beam in lab frame and compute image
#
newbeam.rotate(theta_grazing, axis=1)
# do not undo alpha rotation: newbeam.rotate(-alpha, axis=2)
if undo_shadow_orientation_angle_rotation:
newbeam.rotate(-alpha, axis=2)
newbeam.retrace(q, resetY=True)
# if dump_shadow_files:
# from shadow4.compatibility.beam3 import Beam3
# newbeam.set_column(7, (numpy.sqrt(out_dictionary["Zlist"][oe_index + 1]).flatten()))
# newbeam.set_column(8, 0.0)
# newbeam.set_column(9, 0.0)
# newbeam.set_column(16, 0.0)
# newbeam.set_column(17, 0.0)
# newbeam.set_column(18, 0.0)
# if run_index is None:
# filename = 'star_srcalc.%02d' % (oe_index+1)
# else:
# filename = 'star_srcalc_%03d.%02d' % (run_index, oe_index+1)
# Beam3.initialize_from_shadow4_beam(newbeam).write(filename)
# print("File written to disk: %s" % filename)
tmp = newbeam.get_columns((1, 3, 23))
tmp[2,:] = (out_dictionary["Zlist"][oe_index+1]).flatten()
OE_IMAGE.append(tmp)
# add ray tracing results to in/out dictionary
# footprint
if store_footprint:
out_dictionary["OE_FOOTPRINT"] = OE_FOOTPRINT
if store_image:
if accumulate_results:
if "OE_IMAGE" in out_dictionary.keys():
initialize_results = False
else:
initialize_results = True
else:
initialize_results = True
if initialize_results:
out_dictionary["OE_IMAGE"] = OE_IMAGE
else:
for oe_index in range(number_of_elements):
out_dictionary["OE_IMAGE"][oe_index] = numpy.concatenate( \
(out_dictionary["OE_IMAGE"][oe_index], OE_IMAGE[oe_index]), axis=1)
if True:
if "OE_FOOTPRINT" in out_dictionary.keys():
for i in range(len(out_dictionary["OE_FOOTPRINT"])):
print(">>2>> out_dictionary: from raytracing OE_FOOTPRINT: ", i, out_dictionary["OE_FOOTPRINT"][i].shape)
if "OE_IMAGE" in out_dictionary.keys():
for i in range(len(out_dictionary["OE_IMAGE"])):
print(">>2>> out_dictionary: from raytracing OE_IMAGE: ", i, out_dictionary["OE_IMAGE"][i].shape)
print("\n")
return out_dictionary
# def calculate_pixel_areas(X,Y,suppress_last_row_and_column=True):
# u1 = numpy.roll(X, -1, axis=0) - X
# u2 = numpy.roll(Y, -1, axis=0) - Y
# v1 = numpy.roll(X, -1, axis=1) - X
# v2 = numpy.roll(Y, -1, axis=1) - Y
#
# if suppress_last_row_and_column:
# u1 = u1[0:-1, 0:-1].copy()
# u2 = u2[0:-1, 0:-1].copy()
# v1 = v1[0:-1, 0:-1].copy()
# v2 = v2[0:-1, 0:-1].copy()
# XX = X[0:-1, 0:-1].copy()
# YY = Y[0:-1, 0:-1].copy()
# else:
# XX = X.copy()
# YY = Y.copy()
# return u1 * v2 - u2 * v1, XX, YY
def interpolate_to_regular_grid(power_density_footprint, XX_FOOTPRINT, YY_FOOTPRINT,
nx=None, ny=None,
xrange=None, yrange=None,
renormalize_integrals=True,
interpolation_method=0):
# debug_plot_3d(power_density_footprint.flatten(),
# XX_FOOTPRINT.flatten(), YY_FOOTPRINT.flatten(),
# title="before interpolation, oe: %d"%(element_index+1))
if nx is None:
nx = XX_FOOTPRINT.shape[0]
if ny is None:
ny = YY_FOOTPRINT.shape[1]
if interpolation_method == 0:
method = 'linear'
elif interpolation_method == 1:
method = 'nearest'
elif interpolation_method == 2:
method = 'cubic'
else:
raise Exception("Invalid interpolation method=%d"%interpolation_method)
XX_FOOTPRINT_old = XX_FOOTPRINT.copy()
YY_FOOTPRINT_old = YY_FOOTPRINT.copy()
if xrange is None:
XX_FOOTPRINT_MAX = numpy.max( (-XX_FOOTPRINT_old.min(), XX_FOOTPRINT_old.max()))
xx_footprint = numpy.linspace(-XX_FOOTPRINT_MAX, XX_FOOTPRINT_MAX, nx)
else:
xx_footprint = numpy.linspace(xrange[0], xrange[1], nx)
if yrange is None:
YY_FOOTPRINT_MAX = numpy.max( (-YY_FOOTPRINT_old.min(), YY_FOOTPRINT_old.max()))
yy_footprint = numpy.linspace(-YY_FOOTPRINT_MAX, YY_FOOTPRINT_MAX, ny)
else:
yy_footprint = numpy.linspace(yrange[0], yrange[1], ny)
XX_FOOTPRINT = numpy.outer(xx_footprint,numpy.ones_like(yy_footprint))
YY_FOOTPRINT = numpy.outer(numpy.ones_like(xx_footprint), yy_footprint)
from scipy import interpolate
tmptmp1 = trapezoidal_rule_2d(power_density_footprint)
power_density_footprint = interpolate.griddata(
(XX_FOOTPRINT_old.flatten(), YY_FOOTPRINT_old.flatten()),
power_density_footprint.flatten(),
(XX_FOOTPRINT, YY_FOOTPRINT), method=method, fill_value=0.0, rescale=True)
if renormalize_integrals:
tmptmp2 = trapezoidal_rule_2d(power_density_footprint)
print(">> integral before interpolation: %f, integral after: %f" % (tmptmp1, tmptmp2))
power_density_footprint *= tmptmp1 / tmptmp2
print(">> Renormalized to match integral after with integral before interpolation")
return power_density_footprint, XX_FOOTPRINT, YY_FOOTPRINT
def compute_power_density_footprint(dict1,
verbose=True,
interpolation_method=0,
ratio_pixels_0=1.0,
ratio_pixels_1=1.0,
flip_pixels_number=[0,0,0,0,0,0]):
shapeXY = (dict1["X"].size, dict1["Y"].size)
# now build maps for optical elements
if "OE_FOOTPRINT" in dict1.keys():
OE_FOOTPRINT = dict1["OE_FOOTPRINT"]
number_of_elements_traced = len(OE_FOOTPRINT)
else:
number_of_elements_traced = 0
print(">> compute_power_density_footprint: Number of raytraced elements: %d"%number_of_elements_traced)
POWER_DENSITY_FOOTPRINT = []
POWER_DENSITY_FOOTPRINT_H = []
POWER_DENSITY_FOOTPRINT_V = []
for element_index in range(number_of_elements_traced):
XX_FOOTPRINT = OE_FOOTPRINT[element_index][1, :].reshape(shapeXY)
YY_FOOTPRINT = OE_FOOTPRINT[element_index][0, :].reshape(shapeXY)
power_density_footprint = dict1["Zlist"][element_index] - dict1["Zlist"][element_index+1]
if flip_pixels_number[element_index]:
ny = int(shapeXY[0]*ratio_pixels_0)
nx = int(shapeXY[1]*ratio_pixels_1)
else:
nx = int(shapeXY[0]*ratio_pixels_0)
ny = int(shapeXY[1]*ratio_pixels_1)
power_density_footprint, XX_FOOTPRINT, YY_FOOTPRINT = \
interpolate_to_regular_grid(
power_density_footprint, XX_FOOTPRINT, YY_FOOTPRINT,
nx=nx,ny=ny,
interpolation_method=interpolation_method)
POWER_DENSITY_FOOTPRINT.append( power_density_footprint )
POWER_DENSITY_FOOTPRINT_H.append(XX_FOOTPRINT)
POWER_DENSITY_FOOTPRINT_V.append(YY_FOOTPRINT)
dict1["POWER_DENSITY_FOOTPRINT"] = POWER_DENSITY_FOOTPRINT
dict1["POWER_DENSITY_FOOTPRINT_H"] = POWER_DENSITY_FOOTPRINT_H
dict1["POWER_DENSITY_FOOTPRINT_V"] = POWER_DENSITY_FOOTPRINT_V
if verbose:
for i in range(len(dict1["POWER_DENSITY_FOOTPRINT"])):
print(">>3>> out_dictionary: POWER_DENSITY_FOOTPRINT : ", i, dict1["POWER_DENSITY_FOOTPRINT"][i].shape)
print(">>3>> out_dictionary: POWER_DENSITY_FOOTPRINT_H: ", i, dict1["POWER_DENSITY_FOOTPRINT_H"][i].shape)
print(">>3>> out_dictionary: POWER_DENSITY_FOOTPRINT_V: ", i, dict1["POWER_DENSITY_FOOTPRINT_V"][i].shape)
print("\n")
return dict1
def compute_power_density_image(dict1,
verbose=True,
interpolation_or_histogramming=False,
interpolation_method=0,
ratio_pixels_0=1.0,
ratio_pixels_1=1.0,
flip_pixels_number=[0,0,0,0,0,0]):
shapeXY = (dict1["X"].size, dict1["Y"].size)
try:
OE_IMAGE = dict1["OE_IMAGE"]
number_of_elements_traced = len(OE_IMAGE)
except:
number_of_elements_traced = 0
print(">> compute_power_density_image: Number of raytraced elements: %d"%number_of_elements_traced)
POWER_DENSITY_IMAGE = []
POWER_DENSITY_IMAGE_H = []
POWER_DENSITY_IMAGE_V = []
for element_index in range(number_of_elements_traced):
tmptmp1 = trapezoidal_rule_2d(dict1["Zlist"][element_index+1])
image_H = OE_IMAGE[element_index][0, :]
image_V = OE_IMAGE[element_index][1, :]
weights = OE_IMAGE[element_index][2, :]
f = open("tmp.%02d" % (element_index + 1), 'w')
for i in range(image_V.size):
f.write("%g %g %g\n" % (image_H[i], image_V[i], weights[i]))
f.close()
print("File tmp.%02d written to disk" % (element_index + 1))
# calculate limits
image_H_max = numpy.max( (numpy.abs(image_H.min()), numpy.abs(image_H.max())) )
image_V_max = numpy.max( (numpy.abs(image_V.min()), numpy.abs(image_V.max())) )
if flip_pixels_number[element_index]:
ny = int(shapeXY[0]*ratio_pixels_0)
nx = int(shapeXY[1]*ratio_pixels_1)
else:
nx = int(shapeXY[0]*ratio_pixels_0)
ny = int(shapeXY[1]*ratio_pixels_1)
if interpolation_or_histogramming:
# make histograms for image
(hh,xx,yy) = numpy.histogram2d(image_H, image_V,
bins=[nx,ny], #[100,100], #2*nx0,2*ny0],
range=[[-image_H_max,image_H_max],[-image_V_max,image_V_max]],
normed=False,
weights=weights)
bin_h_left = numpy.delete(xx,-1)
bin_v_left = numpy.delete(yy,-1)
bin_h_right = numpy.delete(xx,0)
bin_v_right = numpy.delete(yy,0)
bin_h_center = 0.5 * (bin_h_left + bin_h_right)
bin_v_center = 0.5 * (bin_v_left + bin_v_right)
xx_image = bin_h_center
yy_image = bin_v_center
power_density_image = hh
# prepare outputs
XX_IMAGE = numpy.outer(xx_image, numpy.ones_like(yy_image))
YY_IMAGE = numpy.outer(numpy.ones_like(xx_image), yy_image)
else:
nruns = int(image_H.size / (shapeXY[0] * shapeXY[1]))
weights_splitted = numpy.split(weights, nruns)
image_H_splitted = numpy.split(image_H, nruns)
image_V_splitted = numpy.split(image_V, nruns)
power_density_image = numpy.zeros((nx, ny))
for i in range(nruns):
power_density_image_i, XX_IMAGE, YY_IMAGE = \
interpolate_to_regular_grid(
weights_splitted[i].reshape(shapeXY),
image_H_splitted[i].reshape(shapeXY),
image_V_splitted[i].reshape(shapeXY),
xrange=[-image_H_max,image_H_max],
yrange=[-image_V_max,image_V_max],
renormalize_integrals=False,
interpolation_method=interpolation_method,
nx=nx,
ny=ny,
)
area_factor = image_H_max * image_V_max /\
(numpy.abs(image_H_splitted[i]).max() * \
numpy.abs(image_V_splitted[i]).max())
power_density_image += power_density_image_i * area_factor
tmptmp2 = trapezoidal_rule_2d(power_density_image)
power_density_image *= tmptmp1 / tmptmp2
print(">> oe %d: integral before interpolation/histograming %f and after: %f" % (element_index+1,tmptmp1, tmptmp2))
print(">> Renormalized to match integral after with integral before interpolation/histogramming")
POWER_DENSITY_IMAGE.append(power_density_image)
POWER_DENSITY_IMAGE_H.append(XX_IMAGE)
POWER_DENSITY_IMAGE_V.append(YY_IMAGE)
dict1["POWER_DENSITY_IMAGE"] = POWER_DENSITY_IMAGE
dict1["POWER_DENSITY_IMAGE_H"] = POWER_DENSITY_IMAGE_H
dict1["POWER_DENSITY_IMAGE_V"] = POWER_DENSITY_IMAGE_V
if verbose:
for i in range(len(dict1["POWER_DENSITY_FOOTPRINT"])):
print(">>3>> out_dictionary: POWER_DENSITY_IMAGE: ", i, dict1["POWER_DENSITY_IMAGE"][i].shape)
print(">>3>> out_dictionary: POWER_DENSITY_IMAGE_H: ", i, dict1["POWER_DENSITY_IMAGE_H"][i].shape)
print(">>3>> out_dictionary: POWER_DENSITY_IMAGE_V: ", i, dict1["POWER_DENSITY_IMAGE_V"][i].shape)
print("\n")
return dict1
def trapezoidal_rule_2d(data2D,H=None,V=None):
if H is None:
HH = numpy.arange(data2D.shape[0])
else:
HH = H[:, 0]
if V is None:
VV = numpy.arange(data2D.shape[1])
else:
VV = V[0, :]
totPower2 = numpy.trapz(data2D, VV, axis=1)
totPower2 = numpy.trapz(totPower2, HH, axis=0)
return totPower2
def trapezoidal_rule_2d_1darrays(data2D,h=None,v=None):
if h is None:
h = numpy.arange(data2D.shape[0])
if v is None:
v = numpy.arange(data2D.shape[1])
totPower2 = numpy.trapz(data2D, v, axis=1)
totPower2 = numpy.trapz(totPower2, h, axis=0)
return totPower2
def debug_plot_3d(zs,xs,ys,title=""):
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
self_axis = fig.add_subplot(111, projection='3d')
# For each set of style and range settings, plot n random points in the box
# defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].
# for m, zlow, zhigh in [('o', -50, -25), ('^', -30, -5)]:
for m, zlow, zhigh in [('o', zs.min(), zs.max())]:
self_axis.scatter(xs, ys, zs, marker=m)
self_axis.set_xlabel('X [mm]')
self_axis.set_ylabel('Y [mm]')
self_axis.set_zlabel('Z [um]')
self_axis.set_title(title)
plt.show()
def write_ansys_files(absorbed2d, H, V, oe_number=1,is_image=False):
if is_image:
fim = "_image"
else:
fim = ""
filename="idpower_fea_3columns_element%d%s.txt" % (oe_number, fim)
f = open(filename, 'w')
for i in range(H.size):
for j in range(V.size):
f.write("%g, %g, %g\n" % (H[i]*1e-3, V[j]*1e-3, absorbed2d[i,j]*1e6))
f.close()
print("File written to disk: %s" % filename)
#
filename="idpower_fea_matrix_element%d%s.txt" % (oe_number, fim)
f = open(filename, 'w')
f.write("%10.5g" % 0)
for i in range(H.size):
f.write(", %10.5g" % (H[i] * 1e-3))
f.write("\n")
for j in range(V.size):
f.write("%10.5g" % (V[j] * 1e-3))
for i in range(H.size):
f.write(", %10.5g" % (absorbed2d[i,j] * 1e6))
f.write("\n")
f.close()
print("File written to disk: %s" % filename)
if __name__ == "__main__":
path = "C:/Users/Manuel/OASYS1.2/OASYS1-ALS-ShadowOui/orangecontrib/xoppy/als/widgets/srcalc/"
test = 3 # 0= widget, 1=load D_IDPower.TXT, 2 =ray tracing, 3 integral
if test == 0:
pass
elif test == 1:
dict1 = load_srcalc_output_file(filename=path+"D_IDPower.TXT", skiprows=5, do_plot=0)
dict2 = ray_tracing(dict1)
dict3 = compute_power_density_footprint(dict2)
elif test == 2:
dict1 = load_srcalc_output_file(filename=path+"D_IDPower.TXT", skiprows=5, do_plot=0)
dict2 = ray_tracing(dict1)
OE_FOOTPRINT = dict2["OE_FOOTPRINT"]
OE_IMAGE = dict2["OE_IMAGE"]
print(OE_FOOTPRINT[0].shape)
from srxraylib.plot.gol import plot_scatter
plot_scatter(OE_FOOTPRINT[0][0,:],OE_FOOTPRINT[0][1,:],plot_histograms=False,title="Footprint",show=False)
plot_scatter(OE_IMAGE[0][0, :], OE_IMAGE[0][1, :], plot_histograms=False,title="Image")
elif test == 3:
a = numpy.loadtxt(path+"D_IDPower.TXT",skiprows=5)
nX = 31 # intervals - URGENT input
nY = 21 # intervals - URGENT input
npointsX = nX + 1
npointsY = nY + 1
print("Dimensions ", a.shape, npointsX * npointsY)
stepX = 30.0 / 2 / nX
stepY = 15.0 / 2 / nY
X = numpy.linspace(0, 30.0 / 2, nX + 1)
Y = numpy.linspace(0, 15.0 / 2, nY + 1)
print("X, Y: ",X.shape,Y.shape)
totPower1 = 4 * trapezoidal_rule_2d_1darrays((a[:, 0]).copy().reshape((npointsX, npointsY)), X, Y)
totPower2 = 4 * trapezoidal_rule_2d_1darrays((a[:, 1]).copy().reshape((npointsX, npointsY)), X, Y)
totPower3 = 4 * trapezoidal_rule_2d_1darrays((a[:, 2]).copy().reshape((npointsX, npointsY)), X, Y)
print("Sum for column 1: ", totPower1)
print("Sum for column 2: ", totPower2)
print("Sum for column 3: ", totPower3)
dict1 = load_srcalc_output_file(filename=path+"D_IDPower.TXT", skiprows=5,
four_quadrants=True,
do_plot=False,
verbose=True)
totPower11 = trapezoidal_rule_2d_1darrays(dict1["Zlist"][0])
totPower22 = trapezoidal_rule_2d_1darrays(dict1["Zlist"][1])
totPower33 = trapezoidal_rule_2d_1darrays(dict1["Zlist"][2])
print("Sum for element 1: ", totPower11)
print("Sum for element 2: ", totPower22)
print("Sum for element 3: ", totPower33)
|
bsd-2-clause
|
bsipocz/statsmodels
|
statsmodels/base/tests/test_shrink_pickle.py
|
19
|
9117
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 09 16:00:27 2012
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import iterkeys, cPickle, BytesIO
import numpy as np
import statsmodels.api as sm
import pandas as pd
from numpy.testing import assert_
from nose import SkipTest
import platform
iswin = platform.system() == 'Windows'
npversionless15 = np.__version__ < '1.5'
winoldnp = iswin & npversionless15
def check_pickle(obj):
fh = BytesIO()
cPickle.dump(obj, fh, protocol=cPickle.HIGHEST_PROTOCOL)
plen = fh.tell()
fh.seek(0, 0)
res = cPickle.load(fh)
fh.close()
return res, plen
class RemoveDataPickle(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setup_class(self):
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
self.exog = x
self.xf = 0.25 * np.ones((2, 4))
self.l_max = 20000
def test_remove_data_pickle(self):
if winoldnp:
raise SkipTest
results = self.results
xf = self.xf
pred_kwds = self.predict_kwds
pred1 = results.predict(xf, **pred_kwds)
#create some cached attributes
results.summary()
res = results.summary2() # SMOKE test also summary2
# uncomment the following to check whether tests run (7 failures now)
#np.testing.assert_equal(res, 1)
#check pickle unpickle works on full results
#TODO: drop of load save is tested
res, l = check_pickle(results._results)
#remove data arrays, check predict still works
results.remove_data()
pred2 = results.predict(xf, **pred_kwds)
np.testing.assert_equal(pred2, pred1)
#pickle, unpickle reduced array
res, l = check_pickle(results._results)
#for testing attach res
self.res = res
#Note: l_max is just a guess for the limit on the length of the pickle
l_max = self.l_max
assert_(l < l_max, msg='pickle length not %d < %d' % (l, l_max))
pred3 = results.predict(xf, **pred_kwds)
np.testing.assert_equal(pred3, pred1)
def test_remove_data_docstring(self):
assert_(self.results.remove_data.__doc__ is not None)
def test_pickle_wrapper(self):
fh = BytesIO() # use cPickle with binary content
# test unwrapped results load save pickle
self.results._results.save(fh)
fh.seek(0, 0)
res_unpickled = self.results._results.__class__.load(fh)
assert_(type(res_unpickled) is type(self.results._results))
# test wrapped results load save
fh.seek(0, 0)
self.results.save(fh)
fh.seek(0, 0)
res_unpickled = self.results.__class__.load(fh)
fh.close()
# print type(res_unpickled)
assert_(type(res_unpickled) is type(self.results))
before = sorted(iterkeys(self.results.__dict__))
after = sorted(iterkeys(res_unpickled.__dict__))
assert_(before == after, msg='not equal %r and %r' % (before, after))
before = sorted(iterkeys(self.results._results.__dict__))
after = sorted(iterkeys(res_unpickled._results.__dict__))
assert_(before == after, msg='not equal %r and %r' % (before, after))
before = sorted(iterkeys(self.results.model.__dict__))
after = sorted(iterkeys(res_unpickled.model.__dict__))
assert_(before == after, msg='not equal %r and %r' % (before, after))
before = sorted(iterkeys(self.results._cache))
after = sorted(iterkeys(res_unpickled._cache))
assert_(before == after, msg='not equal %r and %r' % (before, after))
class TestRemoveDataPickleOLS(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestRemoveDataPickleWLS(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestRemoveDataPicklePoisson(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestRemoveDataPickleNegativeBinomial(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
self.results = mod.fit(disp=0)
class TestRemoveDataPickleLogit(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestRemoveDataPickleRLM(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestRemoveDataPickleGLM(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestPickleFormula(RemoveDataPickle):
@classmethod
def setup_class(cls):
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
cls.exog = pd.DataFrame(x, columns=["A", "B", "C"])
cls.xf = pd.DataFrame(0.25 * np.ones((2, 3)),
columns=cls.exog.columns)
cls.l_max = 900000 # have to pickle endo/exog to unpickle form.
def setup(self):
x = self.exog
np.random.seed(123)
y = x.sum(1) + np.random.randn(x.shape[0])
y = pd.Series(y, name="Y")
X = self.exog.copy()
X["Y"] = y
self.results = sm.OLS.from_formula("Y ~ A + B + C", data=X).fit()
class TestPickleFormula2(RemoveDataPickle):
@classmethod
def setup_class(cls):
nobs = 500
np.random.seed(987689)
data = np.random.randn(nobs, 4)
data[:,0] = data[:, 1:].sum(1)
cls.data = pd.DataFrame(data, columns=["Y", "A", "B", "C"])
cls.xf = pd.DataFrame(0.25 * np.ones((2, 3)),
columns=cls.data.columns[1:])
cls.l_max = 900000 # have to pickle endo/exog to unpickle form.
def setup(self):
self.results = sm.OLS.from_formula("Y ~ A + B + C", data=self.data).fit()
class TestPickleFormula3(TestPickleFormula2):
def setup(self):
self.results = sm.OLS.from_formula("Y ~ A + B * C", data=self.data).fit()
class TestPickleFormula4(TestPickleFormula2):
def setup(self):
self.results = sm.OLS.from_formula("Y ~ np.log(A) + B * C", data=self.data).fit()
# we need log in module namespace for the following test
from numpy import log
class TestPickleFormula5(TestPickleFormula2):
def setup(self):
# if we import here, then unpickling fails -> exception in test
#from numpy import log
self.results = sm.OLS.from_formula("Y ~ log(A) + B * C", data=self.data).fit()
if __name__ == '__main__':
for cls in [TestRemoveDataPickleOLS, TestRemoveDataPickleWLS,
TestRemoveDataPicklePoisson,
TestRemoveDataPickleNegativeBinomial,
TestRemoveDataPickleLogit, TestRemoveDataPickleRLM,
TestRemoveDataPickleGLM]:
print(cls)
cls.setup_class()
tt = cls()
tt.setup()
tt.test_remove_data_pickle()
tt.test_remove_data_docstring()
tt.test_pickle_wrapper()
|
bsd-3-clause
|
wzbozon/scikit-learn
|
examples/preprocessing/plot_robust_scaling.py
|
221
|
2702
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
|
bsd-3-clause
|
walterreade/scikit-learn
|
sklearn/metrics/cluster/bicluster.py
|
359
|
2797
|
from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
|
bsd-3-clause
|
tosolveit/scikit-learn
|
examples/model_selection/plot_roc.py
|
96
|
4487
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
plowman/python-mcparseface
|
models/syntaxnet/tensorflow/tensorflow/examples/skflow/digits.py
|
3
|
2390
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, cross_validation, metrics
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn import monitors
# Load dataset
digits = datasets.load_digits()
X = digits.images
y = digits.target
# Split it into train / test subsets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2,
random_state=42)
# Split X_train again to create validation data
X_train, X_val, y_train, y_val = cross_validation.train_test_split(X_train,
y_train,
test_size=0.2,
random_state=42)
# TensorFlow model using Scikit Flow ops
def conv_model(X, y):
X = tf.expand_dims(X, 3)
features = tf.reduce_max(learn.ops.conv2d(X, 12, [3, 3]), [1, 2])
features = tf.reshape(features, [-1, 12])
return learn.models.logistic_regression(features, y)
val_monitor = monitors.ValidationMonitor(X_val, y_val, n_classes=10, print_steps=50)
# Create a classifier, train and predict.
classifier = learn.TensorFlowEstimator(model_fn=conv_model, n_classes=10,
steps=1000, learning_rate=0.05,
batch_size=128)
classifier.fit(X_train, y_train, val_monitor)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
|
apache-2.0
|
kipohl/ncanda-data-integration
|
scripts/reporting/check_gradient_tables.py
|
4
|
6786
|
#!/usr/bin/en python
import os
import sys
import glob
import json
import numpy as np
import pandas as pd
from lxml import objectify
def read_xml_sidecar(filepath):
"""
Read a CMTK xml sidecar file.
Returns
=======
lxml.objectify
"""
abs_path = os.path.abspath(filepath)
with open(abs_path, 'rb') as fi:
lines = fi.readlines()
lines.insert(1, '<root>')
lines.append('</root>')
string = ''.join(lines)
strip_ge = string.replace('dicom:GE:', '')
strip_dicom = strip_ge.replace('dicom:','')
result = objectify.fromstring(strip_dicom)
return result
def get_array(array_string):
"""
Parse an array from XML string
Returns
=======
np.array
"""
l = array_string.text.split(' ')
return np.fromiter(l, np.float)
def get_gradient_table(parsed_sidecar, decimals=None):
"""
Get the bvector table for a single image
Returns
=======
np.array (rounded to 1 decimal)
"""
b_vector = get_array(parsed_sidecar.mr.dwi.bVector)
b_vector_image = get_array(parsed_sidecar.mr.dwi.bVectorImage)
b_vector_standard = get_array(parsed_sidecar.mr.dwi.bVectorStandard)
if not decimals:
decimals = 1
return np.around([b_vector,
b_vector_image,
b_vector_standard],
decimals=decimals)
def get_cases(cases_root, case=None):
"""
Get a list of cases from root dir, optionally for a single case
"""
match = 'NCANDA_S*'
if case:
match = case
return glob.glob(os.path.join(cases_root, match))
def get_dti_stack(case, arm=None, event=None):
if arm:
path = os.path.join(case, arm)
else:
path = os.path.join(case, '*')
if event:
path = os.path.join(path, event)
else:
path = os.path.join(path,'*')
path = os.path.join(path, 'diffusion/native/dti60b1000/*.xml')
return glob.glob(path)
def get_all_gradients(dti_stack, decimals=None):
"""
Parses a list of dti sidecar files for subject.
Returns
=======
list of np.array
"""
gradiets_per_frame = list()
for xml in dti_stack:
sidecar = read_xml_sidecar(xml)
gradiets_per_frame.append(get_gradient_table(sidecar,
decimals=decimals))
return gradiets_per_frame
def get_site_scanner(site):
"""
Returns the "ground truth" case for gradients.
"""
site_scanner = dict(A='Siemens',
B='GE',
C='GE',
D='Siemens',
E='GE')
return site_scanner.get(site)
def get_ground_truth_gradients(args=None):
"""
Return a dictionary for scanner:gratient
"""
# Choose arbitrary cases for ground truth
test_path = '/fs/ncanda-share/pipeline/cases'
scanner_subject = dict(Siemens='NCANDA_S00061',
GE='NCANDA_S00033')
# Paths to scanner specific gradients
siemens_path = os.path.join(test_path, scanner_subject.get('Siemens'))
ge_path = os.path.join(test_path, scanner_subject.get('GE'))
# Get ground truth for standard baseline
test_arm = 'standard'
test_event = 'baseline'
# Gets files for each scanner
siemens_stack = get_dti_stack(siemens_path, arm=test_arm, event=test_event)
ge_stack = get_dti_stack(ge_path, arm=test_arm, event=test_event)
siemens_stack.sort()
ge_stack.sort()
# Parse the xml files to get scanner specific gradients per frame
siemens_gradients = get_all_gradients(siemens_stack, decimals=args.decimals)
ge_gradients = get_all_gradients(ge_stack, decimals=args.decimals)
return dict(Siemens=siemens_gradients, GE=ge_gradients)
def main(args=None):
# Get the gradient tables for all cases and compare to ground truth
cases = get_cases(args.base_dir, case=args.case)
# Demographics from pipeline to grab case to scanner mapping
demo_path = '/fs/ncanda-share/pipeline/summaries/demographics.csv'
demographics = pd.read_csv(demo_path, index_col=['subject',
'arm',
'visit'])
gradient_map = get_ground_truth_gradients(args=args)
for case in cases:
if args.verbose:
print("Processing: {}".format(case))
# Get the case's site
sid = os.path.basename(case)
site = demographics.loc[sid, args.arm, args.event].site
scanner = get_site_scanner(site)
gradients = gradient_map.get(scanner)
case_dti = os.path.join(args.base_dir, case)
case_stack = get_dti_stack(case_dti, arm=args.arm, event=args.event)
case_stack.sort()
case_gradients = get_all_gradients(case_stack, decimals=args.decimals)
errors = list()
for idx, frame in enumerate(case_gradients):
# if there is a frame that doesn't match, report it.
if not (gradients[idx]==frame).all():
errors.append(idx)
if errors:
key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
result = dict(subject_site_id=key,
frames=errors,
error="Gradient tables do not match for frames.")
print(json.dumps(result, sort_keys=True))
if __name__ == '__main__':
import argparse
formatter = argparse.RawDescriptionHelpFormatter
default = 'default: %(default)s'
parser = argparse.ArgumentParser(prog="check_gradient_tables.py",
description=__doc__,
formatter_class=formatter)
parser.add_argument('-a', '--arm', dest="arm",
help="Study arm. {}".format(default),
default='standard')
parser.add_argument('-b', '--base-dir', dest="base_dir",
help="Study base directory. {}".format(default),
default='/fs/ncanda-share/pipeline/cases')
parser.add_argument('-d', '--decimals', dest="decimals",
help="Number of decimals. {}".format(default),
default=3)
parser.add_argument('-e', '--event', dest="event",
help="Study event. {}".format(default),
default='baseline')
parser.add_argument('-c', '--case', dest="case",
help="Study case. {}".format(default),
default=None)
parser.add_argument('-v', '--verbose', dest="verbose",
help="Turn on verbose", action='store_true')
argv = parser.parse_args()
sys.exit(main(args=argv))
|
bsd-3-clause
|
dvro/imbalanced-learn
|
imblearn/over_sampling/tests/test_smote.py
|
2
|
12097
|
"""Test the module SMOTE."""
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from imblearn.over_sampling import SMOTE
# Generate a global dataset to use
RND_SEED = 0
X = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234]])
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
def test_smote_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(SMOTE)
def test_smote_bad_ratio():
"""Test either if an error is raised with a wrong decimal value for
the ratio"""
# Define a negative ratio
ratio = -1.0
smote = SMOTE(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, smote.fit, X, Y)
# Define a ratio greater than 1
ratio = 100.0
smote = SMOTE(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, smote.fit, X, Y)
# Define ratio as an unknown string
ratio = 'rnd'
smote = SMOTE(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, smote.fit, X, Y)
# Define ratio as a list which is not supported
ratio = [.5, .5]
smote = SMOTE(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, smote.fit, X, Y)
def test_smote_wrong_kind():
"""Test either if an error is raised when the wrong kind of SMOTE is
given."""
kind = 'rnd'
smote = SMOTE(kind=kind, random_state=RND_SEED)
assert_raises(ValueError, smote.fit_sample, X, Y)
def test_smote_fit_single_class():
"""Test either if an error when there is a single class"""
# Create the object
smote = SMOTE(random_state=RND_SEED)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, smote.fit, X, y_single_class)
def test_smote_fit():
"""Test the fitting method"""
# Create the object
smote = SMOTE(random_state=RND_SEED)
# Fit the data
smote.fit(X, Y)
# Check if the data information have been computed
assert_equal(smote.min_c_, 0)
assert_equal(smote.maj_c_, 1)
assert_equal(smote.stats_c_[0], 8)
assert_equal(smote.stats_c_[1], 12)
def test_smote_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Create the object
smote = SMOTE(random_state=RND_SEED)
assert_raises(RuntimeError, smote.sample, X, Y)
def test_sample_regular():
"""Test sample function with regular SMOTE."""
# Create the object
kind = 'regular'
smote = SMOTE(random_state=RND_SEED, kind=kind)
# Fit the data
smote.fit(X, Y)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.29307743, -0.14670439],
[0.84976473, -0.15570176],
[0.61319159, -0.11571668],
[0.66052536, -0.28246517]])
y_gt = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,
0, 0, 0, 0, 0])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_half():
"""Test sample function with regular SMOTE and a ratio of 0.5."""
# Create the object
ratio = 0.8
kind = 'regular'
smote = SMOTE(ratio=ratio, random_state=RND_SEED, kind=kind)
# Fit the data
smote.fit(X, Y)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.36784496, -0.1953161]])
y_gt = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,
0, 0])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_borderline1():
"""Test sample function with borderline 1 SMOTE."""
# Create the object
kind = 'borderline1'
smote = SMOTE(random_state=RND_SEED, kind=kind)
# Fit the data
smote.fit(X, Y)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.3765279, -0.2009615],
[0.55276636, -0.10550373],
[0.45413452, -0.08883319],
[1.21118683, -0.22817957]])
y_gt = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,
0, 0, 0, 0, 0])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_borderline2():
"""Test sample function with borderline 2 SMOTE."""
# Create the object
kind = 'borderline2'
smote = SMOTE(random_state=RND_SEED, kind=kind)
# Fit the data
smote.fit(X, Y)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.47436888, -0.2645749],
[1.07844561, -0.19435291],
[0.33339622, 0.49870937]])
y_gt = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,
0, 0, 0, 0])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_svm():
"""Test sample function with SVM SMOTE."""
# Create the object
kind = 'svm'
smote = SMOTE(random_state=RND_SEED, kind=kind)
# Fit the data
smote.fit(X, Y)
X_resampled, y_resampled = smote.fit_sample(X, Y)
X_gt = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.47436888, -0.2645749],
[1.07844561, -0.19435291],
[1.44015515, -1.30621303]])
y_gt = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1,
0, 0, 0, 0])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
sm = SMOTE(random_state=RND_SEED)
sm.fit(X, Y)
assert_raises(RuntimeError, sm.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
def test_multiclass_error():
""" Test either if an error is raised when the target are not binary
type. """
# continuous case
y = np.linspace(0, 1, 20)
sm = SMOTE(random_state=RND_SEED)
assert_warns(UserWarning, sm.fit, X, y)
# multiclass case
y = np.array([0] * 3 + [1] * 2 + [2] * 15)
sm = SMOTE(random_state=RND_SEED)
assert_warns(UserWarning, sm.fit, X, y)
|
mit
|
jjx02230808/project0223
|
examples/linear_model/plot_sgd_loss_functions.py
|
73
|
1232
|
"""
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
|
bsd-3-clause
|
victorbergelin/scikit-learn
|
examples/cluster/plot_mean_shift.py
|
351
|
1793
|
"""
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
nixingyang/Kaggle-Face-Verification
|
Cervical Cancer Screening/solution_classification_without_additional.py
|
1
|
15624
|
from __future__ import absolute_import, division, print_function
import matplotlib
matplotlib.use("Agg")
import os
import glob
import shutil
import pylab
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Dropout, Input, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import Nadam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.visualize_util import plot
from sklearn.model_selection import StratifiedKFold
from data_preprocessing import PROJECT_FOLDER_PATH
from data_preprocessing import PROCESSED_DATASET_FOLDER_PATH as DATASET_FOLDER_PATH
from data_preprocessing import PROCESSED_IMAGE_HEIGHT as IMAGE_HEIGHT
from data_preprocessing import PROCESSED_IMAGE_WIDTH as IMAGE_WIDTH
from solution_classification_with_additional import OPTIMAL_WEIGHTS_FOLDER_PATH as PREVIOUS_OPTIMAL_WEIGHTS_FOLDER_PATH
# Choose ResNet50 or InceptionV3 or VGG16
MODEL_NAME = "ResNet50" # "ResNet50" or "InceptionV3" or "VGG16"
if MODEL_NAME == "ResNet50":
from keras.applications.resnet50 import preprocess_input as PREPROCESS_INPUT
from keras.applications.resnet50 import ResNet50 as INIT_FUNC
BOTTLENECK_LAYER_NAME = "activation_40"
DROPOUT_RATIO = 0.5
LEARNING_RATE = 0.00001
elif MODEL_NAME == "InceptionV3":
from keras.applications.inception_v3 import preprocess_input as PREPROCESS_INPUT
from keras.applications.inception_v3 import InceptionV3 as INIT_FUNC
BOTTLENECK_LAYER_NAME = "mixed8"
DROPOUT_RATIO = 0.5
LEARNING_RATE = 0.00001
elif MODEL_NAME == "VGG16":
from keras.applications.vgg16 import preprocess_input as PREPROCESS_INPUT
from keras.applications.vgg16 import VGG16 as INIT_FUNC
BOTTLENECK_LAYER_NAME = "block4_pool"
DROPOUT_RATIO = 0.5
LEARNING_RATE = 0.00005
else:
assert False
# Dataset
TRAIN_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "train")
TEST_FOLDER_PATH = os.path.join(DATASET_FOLDER_PATH, "test")
# Workspace
WORKSPACE_FOLDER_PATH = os.path.join("/tmp", os.path.basename(DATASET_FOLDER_PATH))
ACTUAL_DATASET_FOLDER_PATH = os.path.join(WORKSPACE_FOLDER_PATH, "actual_dataset")
ACTUAL_TRAIN_FOLDER_PATH = os.path.join(ACTUAL_DATASET_FOLDER_PATH, "train")
ACTUAL_VALID_FOLDER_PATH = os.path.join(ACTUAL_DATASET_FOLDER_PATH, "valid")
# Output
OUTPUT_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "phase_2")
OPTIMAL_WEIGHTS_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "optimal weights")
SUBMISSION_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "submission")
TRIAL_NUM = 10
# Training and Testing procedure
SPLIT_NUM = 5
MAXIMUM_EPOCH_NUM = 1000
PATIENCE = 4
BATCH_SIZE = 32
SEED = 0
def reformat_testing_dataset():
# Create a dummy folder
dummy_test_folder_path = os.path.join(TEST_FOLDER_PATH, "dummy")
os.makedirs(dummy_test_folder_path, exist_ok=True)
# Move files to the dummy folder if needed
file_path_list = glob.glob(os.path.join(TEST_FOLDER_PATH, "*"))
for file_path in file_path_list:
if os.path.isfile(file_path):
shutil.move(file_path, os.path.join(dummy_test_folder_path, os.path.basename(file_path)))
def reorganize_dataset(original_image_path_list, train_index_array, valid_index_array):
# Create symbolic links
shutil.rmtree(ACTUAL_DATASET_FOLDER_PATH, ignore_errors=True)
for folder_path, index_array in zip((ACTUAL_TRAIN_FOLDER_PATH, ACTUAL_VALID_FOLDER_PATH), (train_index_array, valid_index_array)):
for index_value in index_array:
original_image_path = original_image_path_list[index_value]
path_suffix = original_image_path[len(TRAIN_FOLDER_PATH):]
actual_original_image_path = folder_path + path_suffix
os.makedirs(os.path.abspath(os.path.join(actual_original_image_path, os.pardir)), exist_ok=True)
os.symlink(original_image_path, actual_original_image_path)
return len(glob.glob(os.path.join(ACTUAL_TRAIN_FOLDER_PATH, "*/*"))), len(glob.glob(os.path.join(ACTUAL_VALID_FOLDER_PATH, "*/*")))
def init_model(image_height, image_width, unique_label_num, init_func=INIT_FUNC, bottleneck_layer_name=BOTTLENECK_LAYER_NAME, dropout_ratio=DROPOUT_RATIO, learning_rate=LEARNING_RATE):
def set_model_trainable_properties(model, trainable, bottleneck_layer_name):
for layer in model.layers:
layer.trainable = trainable
if layer.name == bottleneck_layer_name:
break
def get_feature_extractor(input_shape):
feature_extractor = init_func(include_top=False, weights="imagenet", input_shape=input_shape)
set_model_trainable_properties(model=feature_extractor, trainable=False, bottleneck_layer_name=bottleneck_layer_name)
return feature_extractor
def get_dense_classifier(input_shape, unique_label_num):
input_tensor = Input(shape=input_shape)
output_tensor = GlobalAveragePooling2D()(input_tensor)
output_tensor = Dropout(dropout_ratio)(output_tensor)
output_tensor = Dense(unique_label_num, activation="softmax")(output_tensor)
model = Model(input_tensor, output_tensor)
return model
# Initiate the input tensor
if K.image_dim_ordering() == "tf":
input_tensor = Input(shape=(image_height, image_width, 3))
else:
input_tensor = Input(shape=(3, image_height, image_width))
# Define the feature extractor
feature_extractor = get_feature_extractor(input_shape=K.int_shape(input_tensor)[1:])
output_tensor = feature_extractor(input_tensor)
# Define the dense classifier
dense_classifier = get_dense_classifier(input_shape=feature_extractor.output_shape[1:], unique_label_num=unique_label_num)
output_tensor = dense_classifier(output_tensor)
# Define the overall model
model = Model(input_tensor, output_tensor)
model.compile(optimizer=Nadam(lr=learning_rate), loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
# Plot the model structures
plot(feature_extractor, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}_feature_extractor.png".format(MODEL_NAME)), show_shapes=True, show_layer_names=True)
plot(dense_classifier, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}_dense_classifier.png".format(MODEL_NAME)), show_shapes=True, show_layer_names=True)
plot(model, to_file=os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "{}_model.png".format(MODEL_NAME)), show_shapes=True, show_layer_names=True)
# Load weights from previous phase
previous_optimal_weights_file_path = os.path.join(PREVIOUS_OPTIMAL_WEIGHTS_FOLDER_PATH, "{}.h5".format(MODEL_NAME))
assert os.path.isfile(previous_optimal_weights_file_path), "Could not find file {}!".format(previous_optimal_weights_file_path)
print("Loading weights from {} ...".format(previous_optimal_weights_file_path))
model.load_weights(previous_optimal_weights_file_path)
return model
def load_dataset(folder_path, target_size=(IMAGE_HEIGHT, IMAGE_WIDTH), classes=None, class_mode=None, batch_size=BATCH_SIZE, shuffle=True, seed=None, preprocess_input=PREPROCESS_INPUT):
# Get the generator of the dataset
data_generator_object = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=lambda sample: preprocess_input(np.array([sample]))[0])
data_generator = data_generator_object.flow_from_directory(
directory=folder_path,
target_size=target_size,
color_mode="rgb",
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed)
return data_generator
class InspectLossAccuracy(Callback):
def __init__(self):
super(InspectLossAccuracy, self).__init__()
self.train_loss_list = []
self.valid_loss_list = []
self.train_acc_list = []
self.valid_acc_list = []
def on_epoch_end(self, epoch, logs=None):
# Loss
train_loss = logs.get("loss")
valid_loss = logs.get("val_loss")
self.train_loss_list.append(train_loss)
self.valid_loss_list.append(valid_loss)
epoch_index_array = np.arange(len(self.train_loss_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_loss_list, "yellowgreen", label="train_loss")
pylab.plot(epoch_index_array, self.valid_loss_list, "lightskyblue", label="valid_loss")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "{}_loss_curve.png".format(MODEL_NAME)))
pylab.close()
# Accuracy
train_acc = logs.get("acc")
valid_acc = logs.get("val_acc")
self.train_acc_list.append(train_acc)
self.valid_acc_list.append(valid_acc)
epoch_index_array = np.arange(len(self.train_acc_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array, self.train_acc_list, "yellowgreen", label="train_acc")
pylab.plot(epoch_index_array, self.valid_acc_list, "lightskyblue", label="valid_acc")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "{}_accuracy_curve.png".format(MODEL_NAME)))
pylab.close()
def ensemble_predictions(submission_folder_path):
def _ensemble_predictions(ensemble_func, ensemble_submission_file_name):
ensemble_proba = ensemble_func(proba_array, axis=0)
ensemble_proba = ensemble_proba / np.sum(ensemble_proba, axis=1)[:, np.newaxis]
ensemble_submission_file_content.loc[:, proba_columns] = ensemble_proba
ensemble_submission_file_content.to_csv(os.path.abspath(os.path.join(submission_folder_path, os.pardir, ensemble_submission_file_name)), index=False)
# Read predictions
submission_file_path_list = glob.glob(os.path.join(submission_folder_path, "*_Trial_*.csv"))
print("There are {} submissions in total.".format(len(submission_file_path_list)))
submission_file_content_list = [pd.read_csv(submission_file_path) for submission_file_path in submission_file_path_list]
ensemble_submission_file_content = submission_file_content_list[0]
# Concatenate predictions
proba_columns = ensemble_submission_file_content.columns[1:]
proba_list = [np.expand_dims(submission_file_content.as_matrix(proba_columns), axis=0)
for submission_file_content in submission_file_content_list]
proba_array = np.vstack(proba_list)
# Ensemble predictions
for ensemble_func, ensemble_submission_file_name in \
zip([np.max, np.min, np.mean, np.median], ["max.csv", "min.csv", "mean.csv", "median.csv"]):
_ensemble_predictions(ensemble_func, ensemble_submission_file_name)
def run():
print("Creating folders ...")
os.makedirs(OPTIMAL_WEIGHTS_FOLDER_PATH, exist_ok=True)
os.makedirs(SUBMISSION_FOLDER_PATH, exist_ok=True)
print("Reformatting testing dataset ...")
reformat_testing_dataset()
print("Analyzing original dataset ...")
original_image_path_list = sorted(glob.glob(os.path.join(TRAIN_FOLDER_PATH, "*/*.jpg")))
original_image_label_list = [os.path.basename(os.path.abspath(os.path.join(original_image_path, os.pardir))) for original_image_path in original_image_path_list]
print("Getting the labels ...")
unique_label_list = sorted([folder_name for folder_name in os.listdir(TRAIN_FOLDER_PATH) if os.path.isdir(os.path.join(TRAIN_FOLDER_PATH, folder_name))])
print("Initializing model ...")
model = init_model(image_height=IMAGE_HEIGHT, image_width=IMAGE_WIDTH, unique_label_num=len(unique_label_list))
vanilla_weights = model.get_weights()
cv_object = StratifiedKFold(n_splits=SPLIT_NUM, random_state=0)
for split_index, (train_index_array, valid_index_array) in enumerate(cv_object.split(np.zeros((len(original_image_label_list), 1)), original_image_label_list), start=1):
print("Working on splitting fold {} ...".format(split_index))
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "submission_{}.csv".format(split_index))
if os.path.isfile(submission_file_path):
print("The submission file already exists.")
continue
optimal_weights_file_path = os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "optimal_weights_{}.h5".format(split_index))
if os.path.isfile(optimal_weights_file_path):
print("The optimal weights file already exists.")
else:
print("Reorganizing dataset ...")
train_sample_num, valid_sample_num = reorganize_dataset(original_image_path_list, train_index_array, valid_index_array)
print("Startting with vanilla weights ...")
model.set_weights(vanilla_weights)
print("Performing the training procedure ...")
train_generator = load_dataset(ACTUAL_TRAIN_FOLDER_PATH, classes=unique_label_list, class_mode="categorical", shuffle=True, seed=SEED)
valid_generator = load_dataset(ACTUAL_VALID_FOLDER_PATH, classes=unique_label_list, class_mode="categorical", shuffle=True, seed=SEED)
earlystopping_callback = EarlyStopping(monitor="val_loss", patience=PATIENCE)
modelcheckpoint_callback = ModelCheckpoint(optimal_weights_file_path, monitor="val_loss", save_best_only=True, save_weights_only=True)
inspectlossaccuracy_callback = InspectLossAccuracy()
model.fit_generator(generator=train_generator,
samples_per_epoch=train_sample_num,
validation_data=valid_generator,
nb_val_samples=valid_sample_num,
callbacks=[earlystopping_callback, modelcheckpoint_callback, inspectlossaccuracy_callback],
nb_epoch=MAXIMUM_EPOCH_NUM, verbose=2)
assert os.path.isfile(optimal_weights_file_path)
model.load_weights(optimal_weights_file_path)
print("Performing the testing procedure ...")
for trial_index in np.arange(TRIAL_NUM) + 1:
print("Working on trial {}/{} ...".format(trial_index, TRIAL_NUM))
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "{}_Trial_{}.csv".format(MODEL_NAME, trial_index))
if not os.path.isfile(submission_file_path):
print("Performing the testing procedure ...")
test_generator = load_dataset(TEST_FOLDER_PATH, shuffle=False, seed=trial_index)
prediction_array = model.predict_generator(generator=test_generator, val_samples=len(test_generator.filenames))
image_name_array = np.expand_dims([os.path.basename(image_path) for image_path in test_generator.filenames], axis=-1)
index_array_for_sorting = np.argsort(image_name_array, axis=0)
submission_file_content = pd.DataFrame(np.hstack((image_name_array, prediction_array))[index_array_for_sorting.flat])
submission_file_content.to_csv(submission_file_path, header=["image_name"] + unique_label_list, index=False)
print("Performing ensembling ...")
ensemble_predictions(SUBMISSION_FOLDER_PATH)
print("All done!")
if __name__ == "__main__":
run()
|
mit
|
friedrichromstedt/matplotlayers
|
matplotlayers/backends/PIL/figure_canvas.py
|
1
|
2080
|
# Copyright (c) 2010 Friedrich Romstedt <[email protected]>
# See also <www.friedrichromstedt.org> (if e-mail has changed)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Developed since: Jul 2008
"""Defines a Canvas for a matplotlib.figure.Figure instance to be rendered as
a PIL image."""
import PIL.Image
import matplotlib.backends.backend_agg as mpl_backend_agg
class FigureCanvasPIL:
"""A canvas for a matplotlib.figure.Figure instance to be rendered as a
PIL image."""
def __init__(self, figure):
"""FIGURE is a matplotlib.figure.Figure instance."""
self.figure = figure
def output_PIL(self, shape):
"""SHAPE is in pixels."""
dpi = self.figure.dpi
self.figure.set_size_inches(
float(shape[0]) / dpi,
float(shape[1]) / dpi)
agg_canvas = mpl_backend_agg.FigureCanvasAgg(self.figure)
agg_canvas.draw()
image_string = agg_canvas.tostring_rgb()
image = PIL.Image.fromstring("RGB", shape, image_string)
return image
|
mit
|
edhuckle/statsmodels
|
examples/python/quantile_regression.py
|
30
|
3970
|
## Quantile regression
#
# This example page shows how to use ``statsmodels``' ``QuantReg`` class to replicate parts of the analysis published in
#
# * Koenker, Roger and Kevin F. Hallock. "Quantile Regressioin". Journal of Economic Perspectives, Volume 15, Number 4, Fall 2001, Pages 143–156
#
# We are interested in the relationship between income and expenditures on food for a sample of working class Belgian households in 1857 (the Engel data).
#
# ## Setup
#
# We first need to load some modules and to retrieve the data. Conveniently, the Engel dataset is shipped with ``statsmodels``.
from __future__ import print_function
import patsy
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
from statsmodels.regression.quantile_regression import QuantReg
data = sm.datasets.engel.load_pandas().data
data.head()
# ## Least Absolute Deviation
#
# The LAD model is a special case of quantile regression where q=0.5
mod = smf.quantreg('foodexp ~ income', data)
res = mod.fit(q=.5)
print(res.summary())
# ## Visualizing the results
#
# We estimate the quantile regression model for many quantiles between .05 and .95, and compare best fit line from each of these models to Ordinary Least Squares results.
# ### Prepare data for plotting
#
# For convenience, we place the quantile regression results in a Pandas DataFrame, and the OLS results in a dictionary.
quantiles = np.arange(.05, .96, .1)
def fit_model(q):
res = mod.fit(q=q)
return [q, res.params['Intercept'], res.params['income']] + res.conf_int().ix['income'].tolist()
models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=['q', 'a', 'b','lb','ub'])
ols = smf.ols('foodexp ~ income', data).fit()
ols_ci = ols.conf_int().ix['income'].tolist()
ols = dict(a = ols.params['Intercept'],
b = ols.params['income'],
lb = ols_ci[0],
ub = ols_ci[1])
print(models)
print(ols)
# ### First plot
#
# This plot compares best fit lines for 10 quantile regression models to the least squares fit. As Koenker and Hallock (2001) point out, we see that:
#
# 1. Food expenditure increases with income
# 2. The *dispersion* of food expenditure increases with income
# 3. The least squares estimates fit low income observations quite poorly (i.e. the OLS line passes over most low income households)
x = np.arange(data.income.min(), data.income.max(), 50)
get_y = lambda a, b: a + b * x
for i in range(models.shape[0]):
y = get_y(models.a[i], models.b[i])
plt.plot(x, y, linestyle='dotted', color='grey')
y = get_y(ols['a'], ols['b'])
plt.plot(x, y, color='red', label='OLS')
plt.scatter(data.income, data.foodexp, alpha=.2)
plt.xlim((240, 3000))
plt.ylim((240, 2000))
plt.legend()
plt.xlabel('Income')
plt.ylabel('Food expenditure')
plt.show()
# ### Second plot
#
# The dotted black lines form 95% point-wise confidence band around 10 quantile regression estimates (solid black line). The red lines represent OLS regression results along with their 95% confindence interval.
#
# In most cases, the quantile regression point estimates lie outside the OLS confidence interval, which suggests that the effect of income on food expenditure may not be constant across the distribution.
from matplotlib import rc
rc('text', usetex=True)
n = models.shape[0]
p1 = plt.plot(models.q, models.b, color='black', label='Quantile Reg.')
p2 = plt.plot(models.q, models.ub, linestyle='dotted', color='black')
p3 = plt.plot(models.q, models.lb, linestyle='dotted', color='black')
p4 = plt.plot(models.q, [ols['b']] * n, color='red', label='OLS')
p5 = plt.plot(models.q, [ols['lb']] * n, linestyle='dotted', color='red')
p6 = plt.plot(models.q, [ols['ub']] * n, linestyle='dotted', color='red')
plt.ylabel(r'\beta_\mbox{income}')
plt.xlabel('Quantiles of the conditional food expenditure distribution')
plt.legend()
plt.show()
|
bsd-3-clause
|
AlexanderFabisch/scikit-learn
|
examples/linear_model/plot_ols_ridge_variance.py
|
387
|
2060
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
|
bsd-3-clause
|
chrsrds/scikit-learn
|
sklearn/model_selection/_search.py
|
1
|
60959
|
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Mapping, Sequence, Iterable
from functools import partial, reduce
from itertools import product
import numbers
import operator
import time
import warnings
import numpy as np
from scipy.stats import rankdata
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ._validation import _aggregate_score_dicts
from ..exceptions import NotFittedError
from joblib import Parallel, delayed
from ..utils import check_random_state
from ..utils.fixes import MaskedArray
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import _check_multimetric_scoring
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid:
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if not isinstance(param_grid, (Mapping, Iterable)):
raise TypeError('Parameter grid is not a dict or '
'a list ({!r})'.format(param_grid))
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
# check if all entries are dictionaries of lists
for grid in param_grid:
if not isinstance(grid, dict):
raise TypeError('Parameter grid is not a '
'dict ({!r})'.format(grid))
for key in grid:
if not isinstance(grid[key], Iterable):
raise TypeError('Parameter grid value is not iterable '
'(key={!r}, value={!r})'
.format(key, grid[key]))
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler:
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4,
... random_state=rng))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
n_iter = self.n_iter
if grid_size < n_iter:
warnings.warn(
'The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For exhaustive '
'searches, use GridSearchCV.'
% (grid_size, self.n_iter, grid_size), UserWarning)
n_iter = grid_size
for i in sample_without_replacement(grid_size, n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score=np.nan, **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None
The scorer callable object / function must have its signature as
``scorer(estimator, X, y)``.
If ``None`` the estimator's score method is used.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
Returns
-------
score : float
Score of this parameter setting on given test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
# NOTE we are not using the return value as the scorer by itself should be
# validated before. We use check_scoring only to reject multimetric scorer
check_scoring(estimator, scorer)
scores, n_samples_test = _fit_and_score(estimator, X, y,
scorer, train,
test, verbose, parameters,
fit_params=fit_params,
return_n_test_samples=True,
error_score=error_score)
return scores, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class BaseSearchCV(BaseEstimator, MetaEstimatorMixin, metaclass=ABCMeta):
"""Abstract base class for hyper parameter search with cross-validation.
"""
@abstractmethod
def __init__(self, estimator, scoring=None, n_jobs=None, iid='deprecated',
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=True):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self, 'best_estimator_')
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt)
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
Parameters
----------
evaluate_candidates : callable
This callback accepts a list of candidates, where each candidate is
a dict of parameter settings. It returns a dict of all results so
far, formatted like ``cv_results_``.
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" `cv`
instance (e.g., `GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str) or
# This will work for both dict / list (tuple)
self.refit not in scorers) and not callable(self.refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
**fit_and_score_kwargs)
for parameters, (train, test)
in product(candidate_params,
cv.split(X, y, groups)))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
self.best_estimator_ = clone(base_estimator).set_params(
**self.best_params_)
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
if self.iid != 'deprecated':
warnings.warn(
"The parameter 'iid' is deprecated in 0.22 and will be "
"removed in 0.24.", DeprecationWarning
)
iid = self.iid
else:
iid = False
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=test_sample_counts if iid else None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
return results
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=False
If True, return the average score across folds, weighted by the number
of samples in each test set. In this case, the data is assumed to be
identically distributed across the folds, and the loss minimized is
the total loss per sample, and not the mean loss across the folds.
.. deprecated:: 0.22
Parameter ``iid`` is deprecated in 0.22 and will be removed in 0.24
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : boolean, string, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer. ``best_score_`` is not returned if refit is callable.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : boolean, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC()
>>> clf = GridSearchCV(svc, parameters)
>>> clf.fit(iris.data, iris.target)
GridSearchCV(estimator=SVC(),
param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
>>> sorted(clf.cv_results_.keys())
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split2_test_score', ...
'std_fit_time', 'std_score_time', 'std_test_score']
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.80 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.70 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.80 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.93 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
Notes
-----
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(self, estimator, param_grid, scoring=None,
n_jobs=None, iid='deprecated', refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=False):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=False
If True, return the average score across folds, weighted by the number
of samples in each test set. In this case, the data is assumed to be
identically distributed across the folds, and the loss minimized is
the total loss per sample, and not the mean loss across the folds.
.. deprecated:: 0.22
Parameter ``iid`` is deprecated in 0.22 and will be removed in 0.24
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : boolean, string, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given the ``cv_results``.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``RandomizedSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer. When refit is callable, ``best_score_`` is disabled.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : boolean, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.80 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.90 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.70 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.80, 0.90, 0.70],
'split1_test_score' : [0.82, 0.50, 0.70],
'mean_test_score' : [0.81, 0.70, 0.70],
'std_test_score' : [0.01, 0.20, 0.00],
'rank_test_score' : [3, 1, 1],
'split0_train_score' : [0.80, 0.92, 0.70],
'split1_train_score' : [0.82, 0.55, 0.70],
'mean_train_score' : [0.81, 0.74, 0.70],
'std_train_score' : [0.01, 0.19, 0.00],
'mean_fit_time' : [0.73, 0.63, 0.43],
'std_fit_time' : [0.01, 0.02, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04],
'std_score_time' : [0.00, 0.00, 0.00],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import RandomizedSearchCV
>>> from scipy.stats import uniform
>>> iris = load_iris()
>>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200,
... random_state=0)
>>> distributions = dict(C=uniform(loc=0, scale=4),
... penalty=['l2', 'l1'])
>>> clf = RandomizedSearchCV(logistic, distributions, random_state=0)
>>> search = clf.fit(iris.data, iris.target)
>>> search.best_params_
{'C': 2..., 'penalty': 'l1'}
"""
_required_parameters = ["estimator", "param_distributions"]
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
n_jobs=None, iid='deprecated', refit=True,
cv=None, verbose=0, pre_dispatch='2*n_jobs',
random_state=None, error_score=np.nan,
return_train_score=False):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
evaluate_candidates(ParameterSampler(
self.param_distributions, self.n_iter,
random_state=self.random_state))
|
bsd-3-clause
|
andrewrgarcia/3Dmapping-algorithm
|
mapto3d.py
|
1
|
2864
|
#mapto3d.py
#an algorithm for importing and mapping 2D images to 3D plots
#Andrew Garcia, 2016
import numpy as np
import matplotlib.pyplot as plt
import pickle
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.image as mpimg
'converting image to code; in command line:'
#img1 = mpimg.imread("O36062_640.png")
#pickle.dump( img1, open( "jcskull.p", "wb" ) )
'loading encrypted image'
img1 = pickle.load( open( "jcskull.p", "rb" ),encoding='latin1')
'converting image to binary'
lum_img1 = img1[:,:,0]
'pixel dimensions (px x py)'
px=244
py=244
'''mapping algorithm: maps 2D binary image to 3D form by transforming relative
pixel color to depth'''
x=[]
y=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.90:
x.append(j)
y.append(i)
z=1*np.ones(np.size(x))
x2=[]
y2=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.80:
x2.append(j)
y2.append(i)
z2=2*np.ones(np.size(x2))
x3=[]
y3=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.70:
x3.append(j)
y3.append(i)
z3=3*np.ones(np.size(x3))
x4=[]
y4=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.60:
x4.append(j)
y4.append(i)
z4=4*np.ones(np.size(x4))
x5=[]
y5=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.50:
x5.append(j)
y5.append(i)
z5=5*np.ones(np.size(x5))
x6=[]
y6=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.40:
x6.append(j)
y6.append(i)
z6=6*np.ones(np.size(x6))
x7=[]
y7=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.30:
x7.append(j)
y7.append(i)
z7=7*np.ones(np.size(x7))
x8=[]
y8=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.20:
x8.append(j)
y8.append(i)
z8=8*np.ones(np.size(x8))
x9=[]
y9=[]
for i in range(py):
for j in range(px):
if lum_img1[i,j]<0.10:
x9.append(j)
y9.append(i)
z9=9*np.ones(np.size(x9))
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot(z,x,y,'o',color='magenta')
ax.plot(z2, x2, y2,'o',color='yellow')
ax.plot(z3, x3, y3,'o',color='black')
ax.plot(z4, x4, y4,'o',color='cyan')
ax.plot(z5,x5, y5,'o',color='pink')
ax.plot(z6,x6, y6,'o',color='magenta')
ax.plot(z7,x7, y7,'o',color='blue')
ax.plot(z8,x8, y8,'o',color='navy')
ax.plot(z9,x9, y9,'o',color='black')
plt.xlabel('')
plt.ylabel('')
plt.suptitle('Andrew R Garcia, 2016')
plt.show()
|
apache-2.0
|
vshtanko/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
259
|
3265
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
bsd-3-clause
|
Achuth17/scikit-learn
|
sklearn/tests/test_grid_search.py
|
68
|
28778
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
bsd-3-clause
|
meduz/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
140
|
1521
|
"""
=========================================
Density Estimation for a Gaussian mixture
=========================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20., 30.)
y = np.linspace(-20., 40.)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
mayblue9/scikit-learn
|
examples/svm/plot_svm_anova.py
|
250
|
2000
|
"""
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
MazamaScience/marketFutures
|
development/ZachDingels/python/Commodity.py
|
1
|
1982
|
import pandas as pd
import os
from FuturesContract import FuturesContract
CONTRACT_MONTH_CODES = ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z']
class Commodity(object):
"""docstring for Commodity"""
def __init__(self, code, pathToDataDir):
self.code = code
self.pathToDataDir = pathToDataDir
self.contracts = set()
self.load()
@property
def pathToDataDir(self):
return self._pathToDataDir
@pathToDataDir.setter
def pathToDataDir(self, path):
if os.path.exists(path):
self._pathToDataDir = path
self.pathToData = os.path.join(self.pathToDataDir, self.code + '.csv')
if not os.path.exists(self.pathToData):
pd.DataFrame().to_csv(self.pathToData)
else:
raise OSError('Could not find a directory with the path: %s.' % path )
def save(self):
pd.DataFrame()
for fc in self.contracts:
pass
self.data.to_csv(self.pathToData)
def load(self, pathToDataDir=None):
if pathToDataDir:
self._pathToDataDir = pathToDataDir
csvDF = pd.read_csv(self.pathToData)
def update(self, startDate = None, endDate = None):
self.contracts = self.contracts.union(self.generateFutureContracts(startDate, endDate))
def generateFutureContracts(self, startDate, endDate, commodityCode = None):
if not commodityCode:
commodityCode = self.code
contractCodes = set()
years = [str(startDate.year + i) for i in range(endDate.year - startDate.year + 1)]
shortContractCodes = [commodityCode + str(month) for month in CONTRACT_MONTH_CODES]
for year in years:
for contractCode in shortContractCodes:
fullContractCode = contractCode + year
futureContract = FuturesContract(fullContractCode)
contractCodes.add(futureContract)
return contractCodes
|
gpl-3.0
|
DataUSA/datausa-site
|
jhcovid19.py
|
2
|
1835
|
import pandas as pd
import os
import datetime as dt
today = dt.date.today()
start_day = dt.date(2020, 1, 22)
diff = today - start_day
days = diff.days
dates = pd.date_range("2020-01-22", periods=days+1,
freq="D").strftime('%m-%d-%Y')
dates = pd.Series(dates).astype(str)
data = []
for date in dates:
date_ = pd.to_datetime(date)
try:
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{}.csv".format(
date)
df = pd.read_csv(url, sep=",")
if date_ <= pd.to_datetime("2020-03-21"):
df = df[["Province/State", "Country/Region",
"Confirmed", "Deaths", "Recovered"]]
df = df.rename(columns={"Country/Region": "Geography"})
else:
df = df[["Province_State", "Country_Region",
"Confirmed", "Deaths", "Recovered"]]
df = df.rename(columns={"Country_Region": "Geography"})
df["Date"] = date
df["Date"] = df["Date"].str[6:10] + "/" + \
df["Date"].str[0:2] + "/" + df["Date"].str[3:5]
df = df[df["Geography"] != "US"]
df["Geography"] = df["Geography"].replace("Mainland China", "China")
df[["Confirmed", "Deaths", "Recovered"]] = df[[
"Confirmed", "Deaths", "Recovered"]].astype("Int64")
data.append(df)
except Exception as ex:
print(date, ex)
data = pd.concat(data, sort=False)
data = data.groupby(["Geography", "Date"]).sum().reset_index()
data["ID Geography"] = data["Geography"]
path = os.path.dirname(os.path.abspath("__file__")) + \
"/static/datacovid19.json"
previous = pd.read_json(path) if os.path.exists(path) else pd.DataFrame([])
if len(data) > len(previous):
data.to_json(path, orient="records")
|
agpl-3.0
|
wmvanvliet/psychic
|
psychic/tests/testplots.py
|
1
|
1628
|
import unittest, os.path
import matplotlib.pyplot as plt
import numpy as np
from .. import plots
from ..positions import POS_10_5
class TestPlots(unittest.TestCase):
def test_timeseries(self):
plt.clf()
plots.plot_timeseries(np.random.randn(1000, 10))
plt.savefig(os.path.join('out', 'timeseries.eps'))
def test_timeseries2(self):
plt.clf()
eeg = np.sin(2 * np.pi *
(np.linspace(0, 1, 100).reshape(-1, 1) * np.arange(6)))
plots.plot_timeseries(eeg, time=np.linspace(0, 1, 100), offset=2,
color='b', linestyle='--')
plt.savefig(os.path.join('out', 'timeseries2.eps'))
def test_scalpplot(self):
plt.clf()
sensors = ('Fp1 Fp2 AF3 AF4 F7 F3 Fz F4 F8 FC5 FC1 FC2 FC6 T7 C3 Cz C4 T8' +
' CP5 CP1 CP2 CP6 P7 P3 Pz P4 P8 PO3 PO4 O1 Oz O2').split()
activity = np.random.randn(len(sensors)) * 1e-3
activity[sensors.index('Fp1')] = 1.
activity[sensors.index('C3')] = -1.
activity[sensors.index('C4')] = 1.
plots.plot_scalp(activity, sensors, POS_10_5)
plt.savefig(os.path.join('out', 'topo.eps'))
sensors = ('Fp1 Fp2 F7 Fz F8 T7 C3 Cz C4 T8 P7 Pz P8 O1 Oz O2').split()
activity = np.random.randn(len(sensors)) * 1e-3
plots.plot_scalp(activity, sensors, POS_10_5)
def test_scalpgrid(self):
plt.clf()
sensors = ('Fp1 Fp2 AF3 AF4 F7 F3 Fz F4 F8 FC5 FC1 FC2 FC6 T7 C3 Cz C4 T8' +
' CP5 CP1 CP2 CP6 P7 P3 Pz P4 P8 PO3 PO4 O1 Oz O2').split()
plots.plot_scalpgrid(np.eye(32)[:20], sensors, titles=sensors[:20],
width=6, cmap=plt.cm.RdBu_r, clim=[-1, 1])
plt.savefig(os.path.join('out', 'scalpgrid.eps'))
|
bsd-3-clause
|
wzbozon/statsmodels
|
statsmodels/tsa/arima_process.py
|
26
|
30878
|
'''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn,
burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2 * nobs) # no idea right now how large is needed
else:
nobs_ir = max(100, 2 * nobs) # no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs])
for t in range(nobs)])
else:
acovf = np.correlate(ir, ir, 'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar, ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2, nobs+1):
r = acov[:k]
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
`
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)``
fully tested against matlab
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
"""
Represent an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
Parameters
----------
ar : array_like, 1d
Coefficient for autoregressive lag polynomial, including zero lag.
See the notes for some information about the sign.
ma : array_like, 1d
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -ar] # add zero-lag and negate
>>> ma = np.r_[1, ma] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
# maybe needs special handling for unit roots
def __init__(self, ar, ma, nobs=100):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=100):
"""
Create ArmaProcess instance from coefficients of the lag-polynomials
Parameters
----------
arcoefs : array-like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array-like
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
"""
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create ArmaProcess instance from ARMA estimation results
Parameters
----------
model_results : ARMAResults instance
A fitted model
nobs : int, optional
If None, nobs is taken from the results
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(),
self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(),
self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
@property
def arroots(self):
"""
Roots of autoregressive lag-polynomial
"""
return self.arpoly.roots()
@property
def maroots(self):
"""
Roots of moving average lag-polynomial
"""
return self.mapoly.roots()
@property
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.arroots) > 1):
return True
else:
return False
@property
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
#TODO: variable returns like this?
pr = self.ma_roots()
insideroots = np.abs(pr) < 1
if insideroots.any():
pr[np.abs(pr) < 1] = 1./pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
'''generate ARMA samples
Parameters
----------
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, then the timeseries is along
axis. All other axis have independent arma samples.
scale : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations
at the beginning of the sample are dropped
axis : int
See nsample.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not
tested yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
|
bsd-3-clause
|
hainm/scikit-learn
|
sklearn/cluster/tests/test_bicluster.py
|
226
|
9457
|
"""Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
|
bsd-3-clause
|
ak2166/ThinkStats2
|
code/populations.py
|
68
|
2609
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
|
gpl-3.0
|
navigator8972/vae_hwmotion
|
vae_assoc_ujichar_img_jnt.py
|
2
|
9404
|
import itertools
import cPickle as cp
import time
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import baxter_writer as bw
import dataset
import vae_assoc
import utils
np.random.seed(0)
tf.set_random_seed(0)
print 'Loading image data...'
img_data = utils.extract_images(fname='bin/img_data_extend.pkl', only_digits=False)
# img_data = utils.extract_images(fname='bin/img_data.pkl', only_digits=False)
# img_data_sets = dataset.construct_datasets(img_data)
print 'Loading joint motion data...'
fa_data, fa_mean, fa_std = utils.extract_jnt_fa_parms(fname='bin/jnt_ik_fa_data_extend.pkl', only_digits=False)
# fa_data, fa_mean, fa_std = utils.extract_jnt_fa_parms(fname='bin/jnt_fa_data.pkl', only_digits=False)
#normalize data
fa_data_normed = (fa_data - fa_mean) / fa_std
# fa_data_sets = dataset.construct_datasets(fa_data_normed)
print 'Constructing dataset...'
#put them together
aug_data = np.concatenate((img_data, fa_data_normed), axis=1)
data_sets = dataset.construct_datasets(aug_data, validation_ratio=.1, test_ratio=.1)
print 'Start training...'
batch_sizes = [64]
#n_z_array = [3, 5, 10, 20]
n_z_array = [4]
# assoc_lambda_array = [1, 3, 5, 10]
# assoc_lambda_array = [.1, .3, .5]
#assoc_lambda_array = [15, 40]
assoc_lambda_array = [8]
#weights_array = [[2, 1], [5, 1], [10, 1]]
# weights_array=[[30, 1], [50, 1]]
weights_array = [[50, 1]]
plt.style.use('ggplot')
for batch_size, n_z, assoc_lambda, weights in itertools.product(batch_sizes, n_z_array, assoc_lambda_array, weights_array):
img_network_architecture = \
dict(scope='image',
hidden_conv=False,
n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=n_z) # dimensionality of latent space
jnt_network_architecture = \
dict(scope='joint',
hidden_conv=False,
n_hidden_recog_1=200, # 1st layer encoder neurons
n_hidden_recog_2=200, # 2nd layer encoder neurons
n_hidden_gener_1=200, # 1st layer decoder neurons
n_hidden_gener_2=200, # 2nd layer decoder neurons
n_input=147, # 21 bases for each function approximator
n_z=n_z) # dimensionality of latent space
# img_network_architecture = \
# dict(scope='image',
# hidden_conv=True,
# n_hidden_recog_1=16, # 1st layer encoder neurons - depth for convolution layer
# n_hidden_recog_2=64, # 2nd layer encoder neurons - depth for convolution layer
# n_hidden_gener_1=64, # 1st layer decoder neurons - depth for convolution layer
# n_hidden_gener_2=16, # 2nd layer decoder neurons - depth for convolution layer
# n_input=28*28, # MNIST data input (img shape: 28*28)
# n_z=n_z) # dimensionality of latent space
#
# jnt_network_architecture = \
# dict(scope='joint',
# hidden_conv=False,
# n_hidden_recog_1=200, # 1st layer encoder neurons
# n_hidden_recog_2=200, # 2nd layer encoder neurons
# n_hidden_gener_1=200, # 1st layer decoder neurons
# n_hidden_gener_2=200, # 2nd layer decoder neurons
# n_input=147, # 21 bases for each function approximator
# n_z=n_z) # dimensionality of latent space
#create a new graph to ensure the resource is released after the training
#change to clear the default graph
tf.reset_default_graph()
vae_assoc_model, cost_hist = vae_assoc.train(data_sets, [img_network_architecture, jnt_network_architecture], binary=[True, False], weights=weights, assoc_lambda = assoc_lambda, learning_rate=0.001,
batch_size=batch_size, training_epochs=1000, display_step=5)
vae_assoc_model.save_model('output/model_batchsize{}_nz{}_lambda{}_weight{}.ckpt'.format(batch_size, n_z, assoc_lambda, weights[0]))
# time.sleep(10)
# #change to clear the default graph
# tf.reset_default_graph()
#
# vae_assoc_model = vae_assoc.AssocVariationalAutoEncoder([img_network_architecture, jnt_network_architecture],
# binary=[True, False],
# transfer_fct=tf.nn.relu,
# assoc_lambda=5,
# learning_rate=0.0001,
# batch_size=batch_size)
# vae_assoc_model.restore_model()
x_sample = data_sets.test.next_batch(batch_size)[0]
#extract image and fa
x_sample_seg = [x_sample[:, :784], x_sample[:, 784:]]
x_reconstruct = vae_assoc_model.reconstruct(x_sample_seg)
x_synthesis = vae_assoc_model.generate()
z_test = vae_assoc_model.transform(x_sample_seg)
#restore by scale back
x_sample_restore = x_sample_seg
x_sample_restore[1] = x_sample_restore[1] * fa_std + fa_mean
x_reconstruct_restore = x_reconstruct
x_reconstruct_restore[1] = x_reconstruct[1] * fa_std + fa_mean
x_synthesis_restore = x_synthesis
x_synthesis_restore[1] = x_synthesis[1] * fa_std + fa_mean
#prepare cartesian samples...
writer = bw.BaxterWriter()
# cart_sample = [np.array(writer.derive_cartesian_trajectory(np.reshape(s, (7, -1)).T)) for s in x_sample]
# cart_reconstruct = [np.array(writer.derive_cartesian_trajectory(np.reshape(s, (7, -1)).T)) for s in x_reconstruct]
cart_sample = [np.array(writer.derive_cartesian_trajectory_from_fa_parms(np.reshape(s, (7, -1)))) for s in x_sample_restore[1]]
cart_reconstruct = [np.array(writer.derive_cartesian_trajectory_from_fa_parms(np.reshape(s, (7, -1)))) for s in x_reconstruct_restore[1]]
cart_synthesis = [np.array(writer.derive_cartesian_trajectory_from_fa_parms(np.reshape(s, (7, -1)))) for s in x_synthesis_restore[1]]
plt.figure(figsize=(25, 16))
cart_z_sample = []
cart_z_reconstr = []
cart_z_synthesis = []
for i in range(5):
#evaluate the Cartesian movement
# plt.subplot(5, 2, 2*i + 1, projection='3d')
# plt.plot(xs=cart_sample[i][:, 0], ys=cart_sample[i][:, 1], zs=cart_sample[i][:, 2], linewidth=3.5)
plt.subplot(5, 6, 6*i + 1)
plt.plot(-cart_sample[i][:, 1], cart_sample[i][:, 0], linewidth=3.5)
plt.title("Test joint input")
plt.axis('equal')
cart_z_sample = np.concatenate([cart_z_sample, cart_sample[i][:, 2]])
# plt.subplot(5, 2, 2*i + 2, projection='3d')
# plt.plot(xs=cart_reconstruct[i][:, 0], ys=cart_reconstruct[i][:, 1], zs=cart_reconstruct[i][:, 2], linewidth=3.5)
plt.subplot(5, 6, 6*i + 2)
plt.plot(-cart_reconstruct[i][:, 1], cart_reconstruct[i][:, 0], linewidth=3.5)
plt.title("Reconstruction joint")
plt.axis('equal')
cart_z_reconstr = np.concatenate([cart_z_reconstr, cart_reconstruct[i][:, 2]])
plt.subplot(5, 6, 6*i + 3)
plt.imshow(x_sample_restore[0][i].reshape(28, 28), vmin=0, vmax=1)
plt.title("Test image input")
plt.colorbar()
plt.subplot(5, 6, 6*i + 4)
plt.imshow(x_reconstruct_restore[0][i].reshape(28, 28), vmin=0, vmax=1)
plt.title("Reconstruction image")
plt.colorbar()
plt.subplot(5, 6, 6*i + 5)
plt.imshow(x_synthesis_restore[0][i].reshape(28, 28), vmin=0, vmax=1)
plt.title("Synthesis image")
plt.colorbar()
plt.subplot(5, 6, 6*i + 6)
plt.plot(-cart_synthesis[i][:, 1], cart_synthesis[i][:, 0], linewidth=3.5)
plt.title("Synthesis joint")
plt.axis('equal')
cart_z_synthesis = np.concatenate([cart_z_synthesis, cart_synthesis[i][:, 2]])
plt.savefig('output/samples_batchsize{}_nz{}_lambda{}_weight{}.svg'.format(batch_size, n_z, assoc_lambda, weights[0]))
print 'Sample Z Coord:', np.mean(cart_z_sample), np.std(cart_z_sample)
print 'Reconstr Z Coord:', np.mean(cart_z_reconstr), np.std(cart_z_reconstr)
print 'Synthesis Z Coord:', np.mean(cart_z_synthesis), np.std(cart_z_synthesis)
print 'Test Latent variables:'
print 'images:'
print z_test[0][:5]
print 'joints:'
print z_test[1][:5]
# plt.tight_layout()
#see the latent representations layout
nx = ny = 20
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
canvas = np.empty((28*ny, 28*nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
#check the first two dimension
z_mu = np.zeros((batch_size, n_z))
z_mu[0, 0] = xi
z_mu[0, 1] = yi
x_mean = vae_assoc_model.generate(z_mu)
canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0][0].reshape(28, 28)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper")
plt.tight_layout()
plt.savefig('output/samples_2d_batchsize{}_nz{}_lambda{}_weight{}.svg'.format(batch_size, n_z, assoc_lambda, weights[0]))
#plt.show()
#save cost hist
cp.dump(cost_hist, open('output/cost_hist_batchsize{}_nz{}_lambda{}_weight{}.pkl'.format(batch_size, n_z, assoc_lambda, weights[0]), 'wb'))
|
gpl-3.0
|
mbway/Bayesian-Optimisation
|
turbo/plotting/plot_3D.py
|
1
|
4695
|
#!/usr/bin/env python3
'''
Utilities for plotting 3D graphs with plotly.js and matplotlib
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # for matplotlib 3D plots
# Plotly imports
try:
import plotly.offline as ply
import plotly.graph_objs as go
except ImportError:
# plotly not installed
ply = None
# local imports
from turbo.gui.utils import in_jupyter
from .config import Config
if in_jupyter() and ply is not None:
print('Setting up plotly for Jupyter')
# connected = whether to use CDN or load offline from the version stored in
# the python module.
ply.init_notebook_mode(connected=True)
import ipywidgets as widgets
def surface_3D(x, y, z, tooltips=None, axes_names=['x','y','z'], log_axes=(False,False,False)):
'''plot a 3D surface using plotly
Parameters should be of the form:
```
X = np.arange(...)
Y = np.arange(...)
X, Y = np.meshgrid(X, Y)
Z = f(X,Y)
```
Args:
tooltips: an array with the same length as the number of points,
containing a string to display beside them
log_axes: whether the `x,y,z` axes should have a logarithmic scale
(False => linear)
'''
data = [go.Surface(
x=x, y=y, z=z,
text=tooltips, colorscale='Viridis', opacity=1
)]
layout = go.Layout(
title='3D surface',
autosize=False,
width=900,
height=600,
margin=dict(l=0, r=0, b=0, t=0),
scene=dict(
xaxis=dict(title=axes_names[0], type='log' if log_axes[0] else None),
yaxis=dict(title=axes_names[1], type='log' if log_axes[1] else None),
zaxis=dict(title=axes_names[2], type='log' if log_axes[2] else None),
)
)
fig = go.Figure(data=data, layout=layout)
# show_link is a link to export to the 'plotly cloud'
ply.iplot(fig, show_link=False)
def scatter_3D(x,y,z, interactive=False, color_by='z', markersize=2,
tooltips=None, axes_names=['x','y','z'],
log_axes=(False,False,False)):
'''
Args:
interactive: whether to display an interactive slider to choose how many
points to display
color_by: can be one of: 'z', 'age'.
'age' colors by the index (so the age of the sample)
tooltips: an array with the same length as the number of points,
containing a string to display beside them
log_axes: whether the `x,y,z` axes should have a logarithmic scale
(False => linear)
'''
# from https://plot.ly/python/sliders/
x,y,z = x.flatten(), y.flatten(), z.flatten()
num_samples = len(x)
if color_by == 'z':
color_by = z
scale = 'Viridis'
elif color_by == 'age':
color_by = list(reversed(range(num_samples)))
scale = 'Blues'
data = [go.Scatter3d(
x=x, y=y, z=z,
text=tooltips,
mode='markers',
opacity=0.9,
marker=dict(
size=markersize,
color=color_by,
colorscale=scale,
opacity=0.8
)
)]
layout = go.Layout(
title='3D Scatter Plot',
autosize=False,
width=900,
height=600,
margin=dict(l=0, r=0, b=0, t=0),
scene=dict(
xaxis=dict(title=axes_names[0], type='log' if log_axes[0] else None),
yaxis=dict(title=axes_names[1], type='log' if log_axes[1] else None),
zaxis=dict(title=axes_names[2], type='log' if log_axes[2] else None),
)
)
fig = go.Figure(data=data, layout=layout)
if interactive:
def plot(val):
fig['data'][0].update(x=x[:val], y=y[:val], z=z[:val])
ply.iplot(fig, show_link=False)
# continuous_update = if the slider moves from 0 to 100, then call the
# update function with every value from 0 to 100
slider = widgets.IntSlider(value=num_samples, min=0, max=num_samples,
continuous_update=False, width='100%')
slider.description = 'first n points: '
widgets.interact(plot, val=slider)
else:
ply.iplot(fig, show_link=False)
def surface_3D_MPL(X, Y, Z):
'''plot a 3D surface using matplotlib
Parameters should be of the form:
```
X = np.arange(...)
Y = np.arange(...)
X, Y = np.meshgrid(X, Y)
Z = f(X,Y)
```
note: use `%matplotlib tk` to open 3D plots interactively
'''
fig = plt.figure(figsize=Config.fig_sizes['3D'])
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap='plasma', linewidth=0, antialiased=True)
fig.colorbar(surf)
plt.show()
|
gpl-3.0
|
jonandergomez/machine_learning_for_students
|
data_tools/RangeToBitmap.py
|
1
|
1557
|
"""
Author: Jon Ander Gomez Adrian ([email protected], http://personales.upv.es/jon)
Version: 1.0
Date: November 2016
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
"""
import numpy
#from matplotlib import pyplot
import sys
import os
import numpy
class RangeToBitmap:
def __init__(self, bounds = None, num_bits = None, h = None):
self.x_range = numpy.linspace(bounds[0], bounds[1], num_bits)
if h is None:
h = (bounds[1] - bounds[0]) / (2 * num_bits + 1)
self.h = h
try:
self.alpha = -0.5 / (h * h)
except:
print(bounds)
print(num_bits)
sys.exit(1)
def bitmap(self, value):
y = value - self.x_range
y = numpy.exp(self.alpha * y * y)
return y / y.sum()
def __len__(self):
return len(self.x_range)
if __name__ == '__main__':
from bokeh.plotting import figure, output_file, show
rtb = RangeToBitmap(bounds = [numpy.log(1.0e-5), 0.0], num_bits = 10, h = None)
output_file('/tmp/rtb.html')
p = figure(title = 'Checking', x_range = [-16,3], x_axis_label = 'x', y_axis_label = 'bitmap value', width = 900)
i = 1
values = numpy.linspace(-20.0, 2.0, 10)
for x in values:
print(x)
y = rtb.bitmap(x)
print(y)
color = "#%02x%02x%02x" % (int((i * 255) / len(values)), 150, 150)
p.line(rtb.x_range, y, legend = '%.4f' % x, line_width = 2, line_color = color)
i += 1
show(p)
|
mit
|
lolotobg/FakeNewsChallenge
|
class-excercises/preprocess.py
|
1
|
1270
|
from nltk import WordNetLemmatizer, word_tokenize
import re
from sklearn import feature_extraction
from features import Feature
_wnl = WordNetLemmatizer()
class TokenizedLemmas(Feature):
def transform(self, X):
"""
Pre-process the headline and the body of the instances.
Get the list of lemmatized tokens in them.
Add them to the instance's dictionary, keeping the original text.
"""
for instance in X:
instance['body_lemmas'] = _get_tokenized_lemmas(instance['articleBody'])
instance['headline_lemmas'] = _get_tokenized_lemmas(instance['Headline'])
return X
def _remove_stopwords(tokens_list):
""" Removes stopwords from a list of tokens/"""
return [w for w in tokens_list if w not in feature_extraction.text.ENGLISH_STOP_WORDS]
def _normalize_word(word):
""" Lemmatize the given word."""
return _wnl.lemmatize(word).lower()
def _get_tokenized_lemmas(text):
""" Return a list of lemmatized tokens, found in the given text."""
return [_normalize_word(t) for t in word_tokenize(text)]
def _clean(text):
""" Cleans the text: Lowercasing, trimming, removing non-alphanumeric"""
return " ".join(re.findall(r'\w+', text, flags=re.UNICODE)).lower()
|
mit
|
shusenl/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regularization.py
|
355
|
2843
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
bsd-3-clause
|
liangz0707/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
EggInTheShell/TodoCounting
|
generate_segementation_patches.py
|
1
|
4101
|
import numpy as np
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import pandas as pd
from os.path import join, relpath
import glob, os
from scipy.ndimage.filters import gaussian_filter
import pickle
from settings import *
from data_utils import *
from sklearn.feature_extraction import image
# todo patchがどの画像由来か明示して保存する
def patch_split_of(image, patch_size):
w = image.shape[1]//patch_size
h = image.shape[0]//patch_size
patch = np.zeros([w*h, patch_size, patch_size, image.shape[2]], dtype=np.uint8)
for i in range(w*h):
x = i%w
y = i//w
# print(x,y)
patch[i] = image[y*patch_size:(y+1)*patch_size, x*patch_size:(x+1)*patch_size]
return patch
def label_patch_split_of(image, patch_size):
w = image.shape[1]//patch_size
h = image.shape[0]//patch_size
patch = np.zeros([w*h, patch_size, patch_size], dtype=np.uint8)
for i in range(w*h):
x = i%w
y = i//w
# print(x,y)
patch[i] = image[y*patch_size:(y+1)*patch_size, x*patch_size:(x+1)*patch_size]
return patch
def generate_patches_from(image, label):
patch_size = 320
width = image.shape[1]
height = image.shape[0]
new_width = (width//patch_size + 1) * patch_size
new_height = (height//patch_size + 1) * patch_size
new_image = np.zeros([new_height, new_width, 3], dtype=np.uint8)
new_image[:height, :width] = image
new_label = np.zeros([new_height, new_width, 5], dtype=np.bool)
new_label[:height, :width] = label
image_patches = patch_split_of(new_image, patch_size)
label_patches = patch_split_of(new_label, patch_size).astype(np.bool)
# print(np.max(label_patches), np.min(label_patches))
return image_patches, label_patches
def generate_segmentation_patches():
# パスは各環境に合わせて書き換える
# train_folder = DATA_DIR + 'Train_blacked/'
# train_folder = 'data/TrainSmall2/Train_blacked/'
black_folder = DATA_DIR + 'Train_blacked/'
label_folder = DATA_DIR + 'label_images/'
# save_folder = 'H:/KaggleNOAASeaLions/classified_images/'
save_folder = DATA_DIR + 'patches_bool/'
if not os.path.isdir(save_folder):
os.makedirs(save_folder)
patch_list = glob.glob(save_folder+'*_traindata.pkl')
patch_id_list = []
print(patch_list)
for i in patch_list:
patch_id_list.append(int(os.path.basename(i)[:-14]))
print(patch_id_list)
# ラベルデータのリストを読み込む
label_path_list = glob.glob(label_folder+'*.pkl')
print(label_path_list)
# 各画像ごとに処理
for item in label_path_list:
# 画像とラベルを読み込む
id = int(os.path.basename(item)[:-4])
print('processing id: ', id)
if id in black_id_list: continue
if id in patch_id_list:
'already converted'
continue
labelpath = label_folder + str(id) + '.pkl'
with open(labelpath, mode='rb') as f:
label = pickle.load(f)
image = np.asarray(Image.open(black_folder + str(id) + '.png'))
image = image[:,:,:3]
# plt.imshow(image)
# plt.show()
print('shape', image.shape)
# パッチを生成する
image_patch, label_patch = generate_patches_from(image, label)
print('number of patches: ', image_patch.shape[0])
# for i in range(image_patch.shape[0]):
# plt.subplot(1,2,1)
# plt.imshow(image_patch[i])
# plt.subplot(1,2,2)
# label_image = label_patch[i]
# label_image = np.mean(label_image, axis=2)
# plt.imshow(label_image)
# plt.show()
# 保存
# print(image_patch.dtype, label_patch.dtype)
dict = {'image': image_patch, 'label': label_patch}
savepath = save_folder + str(id) + '_traindata.pkl'
with open(savepath, mode='wb') as f:
pickle.dump(dict, f)
print('saved: ', savepath)
if __name__=='__main__': generate_segmentation_patches()
|
mit
|
elliotchencv/cuda-convnet2
|
convdata.py
|
174
|
14675
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
|
apache-2.0
|
M4573R/BuildingMachineLearningSystemsWithPython
|
ch06/01_start.py
|
22
|
3955
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains multinomial Naive Bayes on the tweet corpus
# to find two different results:
# - How well can we distinguis positive from negative tweets?
# - How well can we detect whether a tweet contains sentiment at all?
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
def create_ngram_model():
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
return pipeline
def train_model(clf_factory, X, Y, name="NB ngram", plot=False):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
if plot:
plot_pr(pr_scores[median], name, "01", precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
train_model(create_ngram_model, X, Y, name="sent vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(create_ngram_model, X, Y, name="pos vs rest", plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(create_ngram_model, X, Y, name="neg vs rest", plot=True)
print("time spent:", time.time() - start_time)
|
mit
|
mikelum/pyspeckit
|
ah_bootstrap.py
|
31
|
36162
|
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
mit
|
jstoxrocky/statsmodels
|
statsmodels/tools/grouputils.py
|
25
|
22518
|
# -*- coding: utf-8 -*-
"""Tools for working with groups
This provides several functions to work with groups and a Group class that
keeps track of the different representations and has methods to work more
easily with groups.
Author: Josef Perktold,
Author: Nathaniel Smith, recipe for sparse_dummies on scipy user mailing list
Created on Tue Nov 29 15:44:53 2011 : sparse_dummies
Created on Wed Nov 30 14:28:24 2011 : combine_indices
changes: add Group class
Notes
~~~~~
This reverses the class I used before, where the class was for the data and
the group was auxiliary. Here, it is only the group, no data is kept.
sparse_dummies needs checking for corner cases, e.g.
what if a category level has zero elements? This can happen with subset
selection even if the original groups where defined as arange.
Not all methods and options have been tried out yet after refactoring
need more efficient loop if groups are sorted -> see GroupSorted.group_iter
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
import numpy as np
import pandas as pd
from statsmodels.compat.numpy import npc_unique
import statsmodels.tools.data as data_util
from pandas.core.index import Index, MultiIndex
def combine_indices(groups, prefix='', sep='.', return_labels=False):
"""use np.unique to get integer group indices for product, intersection
"""
if isinstance(groups, tuple):
groups = np.column_stack(groups)
else:
groups = np.asarray(groups)
dt = groups.dtype
is2d = (groups.ndim == 2) # need to store
if is2d:
ncols = groups.shape[1]
if not groups.flags.c_contiguous:
groups = np.array(groups, order='C')
groups_ = groups.view([('', groups.dtype)] * groups.shape[1])
else:
groups_ = groups
uni, uni_idx, uni_inv = npc_unique(groups_, return_index=True,
return_inverse=True)
if is2d:
uni = uni.view(dt).reshape(-1, ncols)
# avoiding a view would be
# for t in uni.dtype.fields.values():
# assert (t[0] == dt)
#
# uni.dtype = dt
# uni.shape = (uni.size//ncols, ncols)
if return_labels:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
return uni_inv, uni_idx, uni, label
else:
return uni_inv, uni_idx, uni
# written for and used in try_covariance_grouploop.py
def group_sums(x, group, use_bincount=True):
"""simple bincount version, again
group : array, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
for comparison, simple python loop
"""
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
elif x.ndim > 2 and use_bincount:
raise ValueError('not implemented yet')
if use_bincount:
# re-label groups or bincount takes too much memory
if np.max(group) > 2 * x.shape[0]:
group = pd.factorize(group)[0]
return np.array([np.bincount(group, weights=x[:, col])
for col in range(x.shape[1])])
else:
uniques = np.unique(group)
result = np.zeros([len(uniques)] + list(x.shape[1:]))
for ii, cat in enumerate(uniques):
result[ii] = x[g == cat].sum(0)
return result
def group_sums_dummy(x, group_dummy):
"""sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix
"""
if data_util._is_using_ndarray_type(group_dummy, None):
return np.dot(x.T, group_dummy)
else: # check for sparse
return x.T * group_dummy
def dummy_sparse(groups):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,)
an array of group indicators for each observation. Group levels are
assumed to be defined as consecutive integers, i.e. range(n_groups)
where n_groups is the number of group levels. A group level with no
observations for it will still produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
indi = sparse.csr_matrix((data, g, indptr))
return indi
class Group(object):
def __init__(self, group, name=''):
# self.group = np.asarray(group) # TODO: use checks in combine_indices
self.name = name
uni, uni_idx, uni_inv = combine_indices(group)
# TODO: rename these to something easier to remember
self.group_int, self.uni_idx, self.uni = uni, uni_idx, uni_inv
self.n_groups = len(self.uni)
# put this here so they can be overwritten before calling labels
self.separator = '.'
self.prefix = self.name
if self.prefix:
self.prefix = self.prefix + '='
# cache decorator
def counts(self):
return np.bincount(self.group_int)
# cache_decorator
def labels(self):
# is this only needed for product of groups (intersection)?
prefix = self.prefix
uni = self.uni
sep = self.separator
if uni.ndim > 1:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
else:
label = [prefix + '%s' % ii for ii in uni]
return label
def dummy(self, drop_idx=None, sparse=False, dtype=int):
"""
drop_idx is only available if sparse=False
drop_idx is supposed to index into uni
"""
uni = self.uni
if drop_idx is not None:
idx = lrange(len(uni))
del idx[drop_idx]
uni = uni[idx]
group = self.group
if not sparse:
return (group[:, None] == uni[None, :]).astype(dtype)
else:
return dummy_sparse(self.group_int)
def interaction(self, other):
if isinstance(other, self.__class__):
other = other.group
return self.__class__((self, other))
def group_sums(self, x, use_bincount=True):
return group_sums(x, self.group_int, use_bincount=use_bincount)
def group_demean(self, x, use_bincount=True):
nobs = float(len(x))
means_g = group_sums(x / nobs, self.group_int,
use_bincount=use_bincount)
x_demeaned = x - means_g[self.group_int] # check reverse_index?
return x_demeaned, means_g
class GroupSorted(Group):
def __init__(self, group, name=''):
super(self.__class__, self).__init__(group, name=name)
idx = (np.nonzero(np.diff(group))[0]+1).tolist()
self.groupidx = lzip([0] + idx, idx + [len(group)])
def group_iter(self):
for low, upp in self.groupidx:
yield slice(low, upp)
def lag_indices(self, lag):
"""return the index array for lagged values
Warning: if k is larger then the number of observations for an
individual, then no values for that individual are returned.
TODO: for the unbalanced case, I should get the same truncation for
the array with lag=0. From the return of lag_idx we wouldn't know
which individual is missing.
TODO: do I want the full equivalent of lagmat in tsa?
maxlag or lag or lags.
not tested yet
"""
lag_idx = np.asarray(self.groupidx)[:, 1] - lag # asarray or already?
mask_ok = (lag <= lag_idx)
# still an observation that belongs to the same individual
return lag_idx[mask_ok]
def _is_hierarchical(x):
"""
Checks if the first item of an array-like object is also array-like
If so, we have a MultiIndex and returns True. Else returns False.
"""
item = x[0]
# is there a better way to do this?
if isinstance(item, (list, tuple, np.ndarray, pd.Series, pd.DataFrame)):
return True
else:
return False
def _make_hierarchical_index(index, names):
return MultiIndex.from_tuples(*[index], names=names)
def _make_generic_names(index):
n_names = len(index.names)
pad = str(len(str(n_names))) # number of digits
return [("group{0:0"+pad+"}").format(i) for i in range(n_names)]
class Grouping(object):
def __init__(self, index, names=None):
"""
index : index-like
Can be pandas MultiIndex or Index or array-like. If array-like
and is a MultipleIndex (more than one grouping variable),
groups are expected to be in each row. E.g., [('red', 1),
('red', 2), ('green', 1), ('green', 2)]
names : list or str, optional
The names to use for the groups. Should be a str if only
one grouping variable is used.
Notes
-----
If index is already a pandas Index then there is no copy.
"""
if isinstance(index, (Index, MultiIndex)):
if names is not None:
if hasattr(index, 'set_names'): # newer pandas
index.set_names(names, inplace=True)
else:
index.names = names
self.index = index
else: # array-like
if _is_hierarchical(index):
self.index = _make_hierarchical_index(index, names)
else:
self.index = Index(index, name=names)
if names is None:
names = _make_generic_names(self.index)
if hasattr(self.index, 'set_names'):
self.index.set_names(names, inplace=True)
else:
self.index.names = names
self.nobs = len(self.index)
self.nlevels = len(self.index.names)
self.slices = None
@property
def index_shape(self):
if hasattr(self.index, 'levshape'):
return self.index.levshape
else:
return self.index.shape
@property
def levels(self):
if hasattr(self.index, 'levels'):
return self.index.levels
else:
return pd.Categorical(self.index).levels
@property
def labels(self):
# this was index_int, but that's not a very good name...
if hasattr(self.index, 'labels'):
return self.index.labels
else: # pandas version issue here
# Compat code for the labels -> codes change in pandas 0.15
# FIXME: use .codes directly when we don't want to support
# pandas < 0.15
tmp = pd.Categorical(self.index)
try:
labl = tmp.codes
except AttributeError:
labl = tmp.labels # Old pandsd
return labl[None]
@property
def group_names(self):
return self.index.names
def reindex(self, index=None, names=None):
"""
Resets the index in-place.
"""
# NOTE: this isn't of much use if the rest of the data doesn't change
# This needs to reset cache
if names is None:
names = self.group_names
self = Grouping(index, names)
def get_slices(self, level=0):
"""
Sets the slices attribute to be a list of indices of the sorted
groups for the first index level. I.e., self.slices[0] is the
index where each observation is in the first (sorted) group.
"""
# TODO: refactor this
groups = self.index.get_level_values(level).unique()
groups.sort()
if isinstance(self.index, MultiIndex):
self.slices = [self.index.get_loc_level(x, level=level)[0]
for x in groups]
else:
self.slices = [self.index.get_loc(x) for x in groups]
def count_categories(self, level=0):
"""
Sets the attribute counts to equal the bincount of the (integer-valued)
labels.
"""
# TODO: refactor this not to set an attribute. Why would we do this?
self.counts = np.bincount(self.labels[level])
def check_index(self, is_sorted=True, unique=True, index=None):
"""Sanity checks"""
if not index:
index = self.index
if is_sorted:
test = pd.DataFrame(lrange(len(index)), index=index)
test_sorted = test.sort()
if not test.index.equals(test_sorted.index):
raise Exception('Data is not be sorted')
if unique:
if len(index) != len(index.unique()):
raise Exception('Duplicate index entries')
def sort(self, data, index=None):
"""Applies a (potentially hierarchical) sort operation on a numpy array
or pandas series/dataframe based on the grouping index or a
user-supplied index. Returns an object of the same type as the
original data as well as the matching (sorted) Pandas index.
"""
if index is None:
index = self.index
if data_util._is_using_ndarray_type(data, None):
if data.ndim == 1:
out = pd.Series(data, index=index, copy=True)
out = out.sort_index()
else:
out = pd.DataFrame(data, index=index)
out = out.sort(inplace=False) # copies
return np.array(out), out.index
elif data_util._is_using_pandas(data, None):
out = data
out = out.reindex(index) # copies?
out = out.sort_index()
return out, out.index
else:
msg = 'data must be a Numpy array or a Pandas Series/DataFrame'
raise ValueError(msg)
def transform_dataframe(self, dataframe, function, level=0, **kwargs):
"""Apply function to each column, by group
Assumes that the dataframe already has a proper index"""
if dataframe.shape[0] != self.nobs:
raise Exception('dataframe does not have the same shape as index')
out = dataframe.groupby(level=level).apply(function, **kwargs)
if 1 in out.shape:
return np.ravel(out)
else:
return np.array(out)
def transform_array(self, array, function, level=0, **kwargs):
"""Apply function to each column, by group
"""
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
dataframe = pd.DataFrame(array, index=self.index)
return self.transform_dataframe(dataframe, function, level=level,
**kwargs)
def transform_slices(self, array, function, level=0, **kwargs):
"""Apply function to each group. Similar to transform_array but does
not coerce array to a DataFrame and back and only works on a 1D or 2D
numpy array. function is called function(group, group_idx, **kwargs).
"""
array = np.asarray(array)
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
# always reset because level is given. need to refactor this.
self.get_slices(level=level)
processed = []
for s in self.slices:
if array.ndim == 2:
subset = array[s, :]
elif array.ndim == 1:
subset = array[s]
processed.append(function(subset, s, **kwargs))
processed = np.array(processed)
return processed.reshape(-1, processed.shape[-1])
# TODO: this isn't general needs to be a PanelGrouping object
def dummies_time(self):
self.dummy_sparse(level=1)
return self._dummies
def dummies_groups(self, level=0):
self.dummy_sparse(level=level)
return self._dummies
def dummy_sparse(self, level=0):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,) an array of group indicators for each
observation. Group levels are assumed to be defined as consecutive
integers, i.e. range(n_groups) where n_groups is the number of
group levels. A group level with no observations for it will still
produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
groups = self.labels[level]
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
self._dummies = sparse.csr_matrix((data, groups, indptr))
if __name__ == '__main__':
# ---------- examples combine_indices
from numpy.testing import assert_equal
np.random.seed(985367)
groups = np.random.randint(0, 2, size=(10, 2))
uv, ux, u, label = combine_indices(groups, return_labels=True)
uv, ux, u, label = combine_indices(groups, prefix='g1,g2=', sep=',',
return_labels=True)
group0 = np.array(['sector0', 'sector1'])[groups[:, 0]]
group1 = np.array(['region0', 'region1'])[groups[:, 1]]
uv, ux, u, label = combine_indices((group0, group1),
prefix='sector,region=',
sep=',',
return_labels=True)
uv, ux, u, label = combine_indices((group0, group1), prefix='', sep='.',
return_labels=True)
group_joint = np.array(label)[uv]
group_joint_expected = np.array(['sector1.region0', 'sector0.region1',
'sector0.region0', 'sector0.region1',
'sector1.region1', 'sector0.region0',
'sector1.region0', 'sector1.region0',
'sector0.region1', 'sector0.region0'],
dtype='|S15')
assert_equal(group_joint, group_joint_expected)
"""
>>> uv
array([2, 1, 0, 0, 1, 0, 2, 0, 1, 0])
>>> label
['sector0.region0', 'sector1.region0', 'sector1.region1']
>>> np.array(label)[uv]
array(['sector1.region1', 'sector1.region0', 'sector0.region0',
'sector0.region0', 'sector1.region0', 'sector0.region0',
'sector1.region1', 'sector0.region0', 'sector1.region0',
'sector0.region0'],
dtype='|S15')
>>> np.column_stack((group0, group1))
array([['sector1', 'region1'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector1', 'region1'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0']],
dtype='|S7')
"""
# ------------- examples sparse_dummies
from scipy import sparse
g = np.array([0, 0, 1, 2, 1, 1, 2, 0])
u = lrange(3)
indptr = np.arange(len(g)+1)
data = np.ones(len(g), dtype=np.int8)
a = sparse.csr_matrix((data, g, indptr))
print(a.todense())
print(np.all(a.todense() == (g[:, None] == np.arange(3)).astype(int)))
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
print('group means')
print(x.T * a)
print(np.dot(x.T, g[:, None] == np.arange(3)))
print(np.array([np.bincount(g, weights=x[:, col]) for col in range(3)]))
for cat in u:
print(x[g == cat].sum(0))
for cat in u:
x[g == cat].sum(0)
cc = sparse.csr_matrix([[0, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 0]])
# ------------- groupsums
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2), g,
use_bincount=False).T)
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2)[:, :, 0], g))
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2)[:, :, 1], g))
# ------------- examples class
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
mygroup = Group(g)
print(mygroup.group_int)
print(mygroup.group_sums(x))
print(mygroup.labels())
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/tests/io/test_common.py
|
1
|
12852
|
"""
Tests for the pandas.io.common functionalities
"""
from io import StringIO
import mmap
import os
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas.util.testing as tm
import pandas.io.common as icom
class CustomFSPath:
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath]
try:
from pathlib import Path
path_types.append(Path)
except ImportError:
pass
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestCommonIOCapabilities:
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
@td.skip_if_no('pathlib')
def test_stringify_path_pathlib(self):
rel_path = icom._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = icom._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
@td.skip_if_no('py.path')
def test_stringify_path_localpath(self):
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = icom._stringify_path(p)
assert result == 'foo/bar.csv'
@pytest.mark.parametrize('extension,expected', [
('', None),
('.gz', 'gzip'),
('.bz2', 'bz2'),
('.zip', 'zip'),
('.xz', 'xz'),
])
@pytest.mark.parametrize('path_type', path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type('foo/bar.csv' + extension)
compression = icom._infer_compression(path, compression='infer')
assert compression == expected
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
filename)
assert filepath_or_buffer != filename
assert os.path.isabs(filepath_or_buffer)
assert os.path.expanduser(filename) == filepath_or_buffer
assert not should_close
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
input_buffer)
assert filepath_or_buffer == input_buffer
assert not should_close
def test_iterator(self):
reader = pd.read_csv(StringIO(self.data1), chunksize=1)
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
it = pd.read_csv(StringIO(self.data1), chunksize=1)
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, 'data', 'does_not_exist.' + fn_ext)
msg1 = (r"File (b')?.+does_not_exist\.{}'? does not exist"
.format(fn_ext))
msg2 = (r"\[Errno 2\] No such file or directory: '.+does_not_exist"
r"\.{}'").format(fn_ext)
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (r"\[Errno 2\] File .+does_not_exist\.{} does not exist:"
r" '.+does_not_exist\.{}'").format(fn_ext, fn_ext)
with pytest.raises(error_class, match=r"({}|{}|{}|{}|{})".format(
msg1, msg2, msg3, msg4, msg5)):
reader(path)
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_expands_user_home_dir(self, reader, module,
error_class, fn_ext, monkeypatch):
pytest.importorskip(module)
path = os.path.join('~', 'does_not_exist.' + fn_ext)
monkeypatch.setattr(icom, '_expand_user',
lambda x: os.path.join('foo', x))
msg1 = (r"File (b')?.+does_not_exist\.{}'? does not exist"
.format(fn_ext))
msg2 = (r"\[Errno 2\] No such file or directory:"
r" '.+does_not_exist\.{}'").format(fn_ext)
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (r"\[Errno 2\] File .+does_not_exist\.{} does not exist:"
r" '.+does_not_exist\.{}'").format(fn_ext, fn_ext)
with pytest.raises(error_class, match=r"({}|{}|{}|{}|{})".format(
msg1, msg2, msg3, msg4, msg5)):
reader(path)
def test_read_non_existant_read_table(self):
path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv')
msg1 = r"File b'.+does_not_exist\.csv' does not exist"
msg2 = (r"\[Errno 2\] File .+does_not_exist\.csv does not exist:"
r" '.+does_not_exist\.csv'")
with pytest.raises(FileNotFoundError, match=r"({}|{})".format(
msg1, msg2)):
with tm.assert_produces_warning(FutureWarning):
pd.read_table(path)
@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', ('io', 'data', 'iris.csv')),
(pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')),
(pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')),
(pd.read_sas, 'os', ('io', 'sas', 'data', 'test1.sas7bdat')),
(pd.read_json, 'os', ('io', 'json', 'data', 'tsframe_v012.json')),
(pd.read_msgpack, 'os', ('io', 'msgpack', 'data', 'frame.mp')),
(pd.read_pickle, 'os', ('io', 'data', 'categorical_0_14_1.pickle')),
])
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_read_fspath_all_read_table(self, datapath):
path = datapath('io', 'data', 'iris.csv')
mypath = CustomFSPath(path)
with tm.assert_produces_warning(FutureWarning):
result = pd.read_table(mypath)
with tm.assert_produces_warning(FutureWarning):
expected = pd.read_table(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('writer_name, writer_kwargs, module', [
('to_csv', {}, 'os'),
('to_excel', {'engine': 'xlwt'}, 'xlwt'),
('to_feather', {}, 'feather'),
('to_html', {}, 'os'),
('to_json', {}, 'os'),
('to_latex', {}, 'os'),
('to_msgpack', {}, 'os'),
('to_pickle', {}, 'os'),
('to_stata', {'time_stamp': pd.to_datetime('2019-01-01 00:00')}, 'os'),
])
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
with open(string, 'rb') as f:
expected = f.read()
writer(mypath, **writer_kwargs)
with open(fspath, 'rb') as f:
result = f.read()
assert result == expected
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip('tables')
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
with p1 as string, p2 as fspath:
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key='bar')
df.to_hdf(string, key='bar')
result = pd.read_hdf(fspath, key='bar')
expected = pd.read_hdf(string, key='bar')
tm.assert_frame_equal(result, expected)
@pytest.fixture
def mmap_file(datapath):
return datapath('io', 'data', 'test_mmap.csv')
class TestMMapWrapper:
def test_constructor_bad_file(self, mmap_file):
non_file = StringIO('I am not a file')
non_file.fileno = lambda: -1
# the error raised is different on Windows
if is_platform_windows():
msg = "The parameter is incorrect"
err = OSError
else:
msg = "[Errno 22]"
err = mmap.error
with pytest.raises(err, match=msg):
icom.MMapWrapper(non_file)
target = open(mmap_file, 'r')
target.close()
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
icom.MMapWrapper(target)
def test_get_attr(self, mmap_file):
with open(mmap_file, 'r') as target:
wrapper = icom.MMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs
if not attr.startswith('__')]
attrs.append('__next__')
for attr in attrs:
assert hasattr(wrapper, attr)
assert not hasattr(wrapper, 'foo')
def test_next(self, mmap_file):
with open(mmap_file, 'r') as target:
wrapper = icom.MMapWrapper(target)
lines = target.readlines()
for line in lines:
next_line = next(wrapper)
assert next_line.strip() == line.strip()
with pytest.raises(StopIteration, match=r'^$'):
next(wrapper)
def test_unknown_engine(self):
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path)
with pytest.raises(ValueError, match='Unknown engine'):
pd.read_csv(path, engine='pyt')
|
bsd-3-clause
|
voxlol/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
42
|
73973
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
bigartm/visartm
|
algo/arranging/main.py
|
1
|
3886
|
import numpy as np
from hamilton_path import HamiltonPath
import matplotlib.pyplot as plt
import time
class DefaultLogger:
def log(self, x):
print(x)
def generate_matrix(N):
path = np.random.choice(N, N, replace=False)
X = np.zeros((N, 3))
for i in range(N):
X[path[i]][0] = 10 * i
X[path[i]][1] = np.random.normal(scale=5)
X[path[i]][2] = np.random.normal(scale=5)
dist = np.zeros((N, N))
for i in range(N):
for j in range(N):
dist[i][j] = np.linalg.norm(X[i] - X[j])
best = 0
for i in range(N - 1):
best += dist[path[i]][path[i + 1]]
return dist, best
def N_profiler():
N_range = range(50, 500, 50)
time_chart = []
for N in N_range:
print(N)
dist, _ = generate_matrix(N)
hp = HamiltonPath(dist)
t = time.time()
hp.solve_annealing_c(5000000)
time_chart.append(5000000 / (time.time() - t))
plt.plot(N_range, time_chart)
plt.xlabel("N")
plt.ylabel("Steps/s")
plt.title("Time of annealing simulation")
plt.show()
def profiler(N=50):
steps_range = [int(1.5**i) for i in range(15, 41)]
time_chart = []
quality_chart = []
dist, best_q = generate_matrix(N)
for steps in steps_range:
print(steps)
t = time.time()
hp = HamiltonPath(dist)
hp.solve_annealing_c(steps)
time_chart.append(time.time() - t)
quality_chart.append(hp.path_weight())
k = steps_range[-1] / time_chart[-1]
plt.plot(steps_range, time_chart)
plt.plot([0, steps_range[-1]], [0, steps_range[-1] / k])
plt.xlabel("Steps")
plt.ylabel("Time, s")
plt.title("Time of annealing simulation")
plt.show()
print("%.0f steps per second" % k)
plt.plot(steps_range, quality_chart)
plt.plot([steps_range[0], steps_range[-1]], [best_q, best_q])
plt.xlabel("Steps")
plt.ylabel("Quality")
plt.title("Quality of annealing simulation")
plt.show()
def final_test():
dist, best_q = generate_matrix(50)
hp = HamiltonPath(dist, caller=DefaultLogger())
hp.solve_annealing()
print(best_q)
print(hp.path_weight())
def steps_optimizer():
steps_range = [int(1.4**i) for i in range(20, 52)]
N_range = range(1, 60)
best_steps_chart = []
for N in N_range:
print("N=%d" % N)
dist, best_q = generate_matrix(N)
hp = HamiltonPath(dist, caller=DefaultLogger())
print("Best: %f" % best_q)
hp.path = list(np.random.choice(N, N, replace=False))
q = []
best_steps = 1e9
for steps in steps_range:
hp.solve_annealing_c(
steps=steps,
Tmax=np.mean(dist) * 1e5,
Tmin=np.mean(dist) * 1e-5)
quality = hp.path_weight()
q.append(quality)
if (quality == best_q and steps < best_steps):
best_steps = steps
break
best_steps_chart.append(best_steps)
# plt.plot(steps_range, q)
# plt.plot([steps_range[0],steps_range[-1]], [best_q, best_q])
# plt.show()
plt.plot(N_range, best_steps_chart)
plt.plot(N_range, 1e4 * np.power(N_range, 2))
plt.plot()
plt.xlabel("N")
plt.ylabel("Optimal number of steps")
# hp.solve_simanneal()
# print(hp.path)
'''
hp.solve_annealing(steps=1000000, Tmax=np.mean(dist)*1e6,
Tmin=np.mean(dist)*1e-6)
plt.plot(hp.chart_iterations, hp.chart_weight)
plt.plot([0, hp.chart_iterations[-1]], [best_q, best_q])
plt.show()
print(hp.path_weight())
print(hp.path)
hp.solve_annealing_c(steps=10000000, Tmax=np.mean(dist)*1e6,
Tmin=np.mean(dist)*1e-6)
print(hp.path_weight())
print(hp.path)
'''
# steps_optimizer()
final_test()
# profiler()
# N_profiler()
|
bsd-3-clause
|
OwaJawa/kaggle-galaxies
|
try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup2.py
|
7
|
17746
|
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup2.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup2.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes, processor_class=ra.LoadAndProcessPysexGen1CenteringRescaling)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
|
bsd-3-clause
|
mdbartos/RIPS
|
temporary/basemap_base.py
|
1
|
10637
|
from mpl_toolkits.basemap import Basemap, cm
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import geopandas as gpd
from scipy import stats
import matplotlib as mpl
from shapely import geometry
#m.readshapefile('/home/akagi/Dropbox/NSF WSC AZ WEN Team Share/Electricity Demand/plots/util_proj/utility_rcp45', 'utility', drawbounds=False)
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def norm_cmap(values, cmap, normalize, cm, mn, mx):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
if (mn is None) and (mx is None):
mn, mx = min(values), max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap, norm
def __pysal_choro(values, scheme, k=5):
""" Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme
pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks']
k
number of classes (2 <= k <=9)
Returns
-------
values
Series with values replaced with class identifier if PySAL is available, otherwise the original values are used
"""
try:
from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
s0 = scheme
scheme = scheme.lower()
if scheme not in schemes:
scheme = 'quantiles'
print('Unrecognized scheme: ', s0)
print('Using Quantiles instead')
if k < 2 or k > 9:
print('Invalid k: ', k)
print('2<=k<=9, setting k=5 (default)')
k = 5
binning = schemes[scheme](values, k)
values = binning.yb
except ImportError:
print('PySAL not installed, setting map to default')
return values
def plot_linestring(ax, geom, color='black', linewidth=1, **kwargs):
""" Plot a single LineString geometry """
a = np.array(geom)
ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth, **kwargs)
def plot_multilinestring(ax, geom, color='red', linewidth=1):
""" Can safely call with either LineString or MultiLineString geometry
"""
if geom_type == 'LineString':
plot_linestring(ax, geom, color=color, linewidth=linewidth)
elif geom_type == 'MultiLineString':
for line in geom.geoms:
plot_linestring(ax, line, color=color, linewidth=linewidth)
def plot_polygon(ax, polymap, facecolor='red', edgecolor='black', alpha=1, linewidth=1):
""" Plot a single Polygon geometry """
from descartes.patch import PolygonPatch
a = np.asarray(polymap)
poly = geometry.asPolygon(polymap)
# without Descartes, we could make a Patch of exterior
ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha))
ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth)
for p in poly.interiors:
x, y = zip(*p.coords)
ax.plot(x, y, color=edgecolor, linewidth=linewidth)
def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5):
""" Can safely call with either Polygon or Multipolygon geometry
"""
if geom.type == 'Polygon':
plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)
elif geom.type == 'MultiPolygon':
for poly in geom.geoms:
plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)
# colorset (from plot geodataframe
def plot_to_basemap(name, column=None, geom_type='LineString', colormap='rainbow', scheme=None, k=5, fixed_bins=False, man_bins=None, mn=None, mx=None, linewidth=1, alpha=1, orientation='horizontal', save=False, **kwargs):
from matplotlib.colors import Normalize
from matplotlib import cm as mcm
from matplotlib import colorbar
from matplotlib import ticker
from scipy import stats
fig = plt.figure(figsize=(8,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# create polar stereographic Basemap instance.
m = Basemap(-125, 31, -102, 50,\
rsphere=6371200.,resolution='l',area_thresh=10000)
# draw coastlines, state and country boundaries, edge of map.
m.drawmapboundary(fill_color='lightsteelblue')
m.fillcontinents('0.85', lake_color='lightsteelblue')
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.readshapefile('/home/akagi/trans_impacts', 'transmission', drawbounds=False)
values = np.array([i[column] for i in getattr(m, '%s_info' % name)])
if man_bins:
valuebins = np.asarray(man_bins)
k = len(man_bins) - 1
else:
valuebins = stats.mstats.mquantiles(values, np.linspace(0,1,k+1))
if fixed_bins:
values = np.digitize(values, valuebins)
else:
# pysal not working with new colorbar definition
values = __pysal_choro(values, scheme, k=k)
cmap_ints = np.arange(len(man_bins) + 1)
cmap, norm = norm_cmap(values, colormap, Normalize, mcm, mn=cmap_ints[0], mx=cmap_ints[-1])
for geom, value in zip(getattr(m, name), values):
if geom_type == 'Polygon' or geom_type == 'MultiPolygon':
plot_polygon(ax, geom, facecolor=cmap.to_rgba(value), alpha=alpha, linewidth=linewidth, **kwargs)
elif geom_type == 'LineString' or geom_type == 'MultiLineString':
plot_linestring(m, geom, color=cmap.to_rgba(value), linewidth=linewidth, **kwargs)
# TODO: color point geometries
elif geom_type == 'Point':
plot_point(ax, geom, **kwargs)
dcmap = mpl.colors.ListedColormap([cmap.to_rgba(i) for i in cmap_ints][1:-1])
dcmap.set_under(cmap.to_rgba(0))
dcmap.set_over(cmap.to_rgba(cmap_ints[-1]))
# cax = fig.add_axes([0.92, 0.2, 0.02, 0.6])
cax = fig.add_axes([0.1, 0.1, 0.8, 0.02])
cb = colorbar.ColorbarBase(cax, cmap=dcmap, norm=norm, orientation=orientation, extend='both', spacing='uniform', extendfrac='auto')
cb.locator = ticker.LinearLocator(numticks=len(cmap_ints)-1)
cb.formatter = ticker.FixedFormatter(['%.2f' % (i) for i in valuebins.tolist()])
cb.update_ticks()
if save:
fig.savefig('%s.png' % column)
# plt.close(fig)
# cax = fig.add_axes([0.92, 0.2, 0.02, 0.6])
# cb = colorbar.ColorbarBase(cax, cmap=colormap, norm=norm, orientation='vertical')
# cb.locator = ticker.MaxNLocator(nbins=k)
# cb.formatter = ticker.FixedFormatter(valuebins.astype(str).tolist())
# cb.update_ticks()
man_bins = (-7.5, -5.0, -4.0, -3.0, -2.0, -1.0, 0.0)
#man_bins = (-9.17343664, -4.85376639, -4.4010333 , -3.98366336, -3.53319506, 0.0, 2.42685153)
#man_bins = (1.02, 1.04, 1.06, 1.08, 1.10, 1.12)
plot_to_basemap('transmission', 'pct_decrea', fixed_bins=True, man_bins=man_bins, colormap='jet_r', linewidth=0.1, save=True)
for c in ['pct_decrea', 'pct_decr_1', 'pct_decr_2', 'pct_decr_3', 'pct_decr_4', 'pct_decr_5', 'pct_decr_6', 'pct_decr_7', 'pct_decr_8', 'pct_decr_9']:
plot_to_basemap('transmission', c, fixed_bins=True, man_bins=man_bins, colormap='jet_r', linewidth=0.1, save=True)
# functions
# RUN
# plot_to_basemap(m, 'transmission', 'pct_decrea', scheme='Quantiles', colormap='jet_r', linewidth=0.1)
# man_bins = (-9.17343664, -4.85376639, -4.4010333 , -3.98366336, -3.53319506, 0.0, 2.42685153)
# plot_to_basemap(m, 'transmission', 'pct_decrea', fixed_bins=True, man_bins=man_bins, colormap='jet_r', linewidth=0.1)
# plot_to_basemap(m, 'utility', 'load_2050', geom_type='Polygon', fixed_bins=True, man_bins=man_bins, colormap='OrRd', linewidth=0.1)
# THINGS
name='transmission'
column='pct_decrea'
# scheme='Quantiles'
scheme=None
colormap='jet_r'
linewidth=0.1
mn=None
mx=None
k=5
fixed_bins=True
man_bins = (-7.5, -5.0, -4.0, -3.0, -2.0, -1.0, 0.0)
from matplotlib.colors import Normalize
from matplotlib import cm as mcm
from matplotlib import colorbar
from matplotlib import ticker
from scipy import stats
fig = plt.figure(figsize=(8,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# create polar stereographic Basemap instance.
m = Basemap(-125, 31, -102, 50,\
rsphere=6371200.,resolution='l',area_thresh=10000)
# draw coastlines, state and country boundaries, edge of map.
m.drawmapboundary(fill_color='lightsteelblue')
m.fillcontinents('0.85', lake_color='lightsteelblue')
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.readshapefile('/home/akagi/trans_impacts', 'transmission', drawbounds=False)
values = np.array([i[column] for i in getattr(m, '%s_info' % name)])
#valuebins = stats.mstats.mquantiles(values, np.linspace(0,1,k+1))
if fixed_bins:
valuebins = np.asarray(man_bins)
k = len(man_bins) - 1
else:
valuebins = stats.mstats.mquantiles(values, np.linspace(0,1,k+1))
if fixed_bins:
values = np.digitize(values, valuebins)
else:
values = __pysal_choro(values, scheme, k=k)
cmap_ints = np.arange(len(man_bins) + 1)
cmap, norm = norm_cmap(values, colormap, Normalize, mcm, mn=cmap_ints[0], mx=cmap_ints[-1])
# try_discrete
dcmap = mpl.colors.ListedColormap([cmap.to_rgba(i) for i in cmap_ints][1:-1])
dcmap.set_under(cmap.to_rgba(0))
dcmap.set_over(cmap.to_rgba(cmap_ints[-1]))
cax = fig.add_axes([0.92, 0.2, 0.02, 0.6])
cb = colorbar.ColorbarBase(cax, cmap=dcmap, norm=norm, orientation='vertical', extend='both', spacing='uniform', extendfrac='auto')
#cb.locator = ticker.MaxNLocator(nbins=k)
cb.locator = ticker.LinearLocator(numticks=len(cmap_ints) - 1)
cb.formatter = ticker.FixedFormatter(valuebins.astype(str).tolist())
#cb.ax.set_yticklabels(valuebins[1].astype(str))
cb.update_ticks()
plt.show()
# 6 colors on colorbar
# digitize produces 7 bins
|
mit
|
ebolyen/q2d2
|
q2d2/__init__.py
|
2
|
16324
|
#!/usr/bin/env python
__version__ = "0.0.0-dev"
import random
import io
import itertools
from collections import defaultdict, namedtuple
import hashlib
import os
import shutil
import glob
import math
from functools import partial
import numpy as np
import pandas as pd
from IPython.html import widgets
from IPython.html.widgets import interactive, fixed, IntSlider
from IPython.display import display
from scipy.optimize import minimize_scalar
import skbio
from skbio.diversity.beta import pw_distances
import skbio.diversity.alpha
from skbio.stats.ordination import pcoa
from skbio.stats import subsample_counts
from skbio.util import safe_md5
from q2d2.wui import metadata_controls
data_type_to_study_filename = {'sample_metadata': '.sample-md',
'otu_metadata': '.otu-md',
'unrarefied_biom': '.biom',
'rarefied_biom': '.rarefied-biom',
'tree': '.tree'}
# this may make sense as a database schema. can we use an existing schema, e.g. Qiita?
WorkflowCategory = namedtuple('WorkflowCategory', ['title'])
Workflow = namedtuple('Workflow', ['title', 'inputs', 'outputs', 'category_id'])
workflow_categories = {
'no-biom': WorkflowCategory('No BIOM table'),
'raw-biom': WorkflowCategory('Raw (unnormalized) BIOM table'),
'normalized-biom': WorkflowCategory('Normalized BIOM table')
}
workflows = {
'rarefy-biom': Workflow(
'Rarefy BIOM table', {'unrarefied_biom'}, {'rarefied_biom'}, 'raw-biom'),
'biom-to-taxa-plots': Workflow(
'Taxonomy plots', {'unrarefied_biom', 'sample_metadata', 'otu_metadata'}, {}, 'raw-biom'),
'biom-to-adiv': Workflow(
'Alpha diversity', {'rarefied_biom', 'sample_metadata'}, {}, 'normalized-biom'),
'biom-to-bdiv': Workflow(
'Beta diversity', {'rarefied_biom', 'sample_metadata'}, {}, 'normalized-biom')
}
def get_data_info(study_id):
existing_data_types = get_existing_data_types(study_id)
data_info = []
for data_type in data_type_to_study_filename:
filename = data_type_to_study_filename[data_type]
exists = data_type in existing_data_types
data_info.append((data_type, filename, exists))
return data_info
def get_workflow_info(workflow_id):
workflow = workflows[workflow_id]
return {
'workflow-id': workflow_id,
'title': workflow.title,
'inputs': list(workflow.inputs),
'outputs': list(workflow.outputs),
'category-id': workflow.category_id
}
def get_workflow_category_info(category_id):
return {
'category-id': category_id,
'title': workflow_categories[category_id].title
}
def get_study_state(study_id):
existing_data_types = get_existing_data_types(study_id)
state = {
'study-id': study_id,
'workflow': {'exe': [], 'nexe': []},
'data': {}
}
for workflow_id in workflows:
workflow = workflows[workflow_id]
if workflow.inputs.issubset(existing_data_types):
state['workflow']['exe'].append(workflow_id)
else:
state['workflow']['nexe'].append(workflow_id)
for data_type in existing_data_types:
data_filepath = get_data_filepath(data_type, study_id)
with open(data_filepath, 'rb') as data_file:
# should we be using sha256 instead?
md5 = safe_md5(data_file).hexdigest()
state['data'][data_filepath] = md5
return state
def get_system_info():
# what other info goes here? dependencies?
return {'version': __version__}
def get_existing_data_types(study_id):
data_types = set()
for data_type in data_type_to_study_filename:
try:
get_data_filepath(data_type, study_id)
except FileNotFoundError:
pass
else:
data_types.add(data_type)
return data_types
def create_index(study_id, command):
markdown_s = get_index_markdown(study_id, command)
output_filepath = os.path.join(study_id, 'index.md')
open(output_filepath, 'w').write(markdown_s)
def get_data_filepath(data_type, study_id):
data_filepath = os.path.join(study_id, data_type_to_study_filename[data_type])
if not os.path.exists(data_filepath):
raise FileNotFoundError(data_filepath)
return data_filepath
def create_input_files(study_id, **kwargs):
for input_type, input_filepath in kwargs.items():
study_filepath = data_type_to_study_filename[input_type]
study_filepath = os.path.join(study_id, study_filepath)
shutil.copy(input_filepath, study_filepath)
def load_table(rarefied=False):
if rarefied:
table_path = data_type_to_study_filename['rarefied_biom']
else:
table_path = data_type_to_study_filename['unrarefied_biom']
result = pd.read_csv(table_path, sep='\t', skiprows=1, index_col=0)
result.index = result.index.astype(str)
if 'taxonomy' in result:
result.drop('taxonomy', axis=1, inplace=True)
return result
def store_table(table, rarefied=False):
if rarefied:
table_path = data_type_to_study_filename['rarefied_biom']
else:
table_path = data_type_to_study_filename['unrarefied_biom']
with open(table_path, 'w') as table_file:
table_file.write('# Constructed by [q2d2](github.com/gregcaporaso/q2d2)\n')
table.to_csv(table_file, index_label="#OTU ID", sep='\t')
load_rarefied_table = partial(load_table, rarefied=True)
store_rarefied_table = partial(store_table, rarefied=True)
def load_tree():
return skbio.TreeNode.read(data_type_to_study_filename['tree'], format='newick')
def load_sample_metadata():
return pd.read_csv(data_type_to_study_filename['sample_metadata'], sep='\t', index_col=0)
def load_otu_metadata():
return pd.read_csv(data_type_to_study_filename['otu_metadata'], sep='\t', names=['OTU ID', 'taxonomy'],
index_col=0, usecols=[0, 1], dtype=object)
def biom_to_adiv(metric, biom, tree=None):
metric_f = getattr(skbio.diversity.alpha, metric)
results = []
for e in biom.columns:
if metric == 'faith_pd':
results.append(metric_f(biom[e], biom.index, tree))
else:
results.append(metric_f(biom[e]))
return pd.Series(results, index=biom.columns)
def compute_alphas(otu_table, tree=None,
metrics=['chao1',
'faith_pd',
'observed_otus']):
alphas = {}
for metric in metrics:
alpha = biom_to_adiv(metric, otu_table, tree)
alphas[metric] = alpha
return alphas
def biom_to_dm(metric, biom, tree=None):
return pw_distances(metric=metric, counts=biom.T, ids=biom.columns)
def dm_to_pcoa(dm, sample_md, category):
title = "Samples colored by %s." % category
pcoa_results = pcoa(dm)
_ = pcoa_results.plot(df=sample_md,
column=category,
axis_labels=['PC 1', 'PC 2', 'PC 3'],
title=title,
s=35)
def table_summary(df):
print("Samples: ", len(df.columns))
print("Observations: ", len(df.index))
print("Sequence/sample count detail:")
print(df.sum().describe())
def get_workflow_template_filepath(workflow_id):
base_dir = os.path.abspath(os.path.split(__file__)[0])
return os.path.join(base_dir, "markdown", "%s.md" % workflow_id)
def get_workflow_filepath(workflow_id, study_id):
return os.path.join(study_id, "%s.md" % workflow_id)
def create_workflow(workflow_id, study_id):
workflow_template_filepath = get_workflow_template_filepath(workflow_id)
workflow_filepath = get_workflow_filepath(workflow_id, study_id)
if not os.path.exists(workflow_filepath):
shutil.copy(workflow_template_filepath, workflow_filepath)
return workflow_filepath
def delete_workflow(workflow_id, study_id):
workflow_filepath = get_workflow_filepath(workflow_id, study_id)
os.remove(workflow_filepath)
def get_index_markdown(study_id, command):
index_md_template = open(get_workflow_template_filepath('index')).read()
md_fps = glob.glob(os.path.join(study_id, '*.md'))
md_fps.sort()
toc = []
for md_fp in md_fps:
md_fn = os.path.split(md_fp)[1]
title = os.path.splitext(md_fn)[0].replace('-', ' ').title()
toc.append(' * [%s](%s)' % (title, md_fn))
toc = '\n'.join(toc)
result = index_md_template.format(toc, study_id, __version__, command)
return result
def _summarize_even_sampling_depth(even_sampling_depth, counts):
samples_retained = (counts >= even_sampling_depth)
num_samples_retained = samples_retained.sum()
num_sequences_retained = num_samples_retained * even_sampling_depth
return samples_retained, num_samples_retained, num_sequences_retained
def _get_depth_for_max_sequence_count(counts):
"""Find the even sampling depth that retains the most sequences."""
count_summary = counts.describe()
def f(d):
return -1 * _summarize_even_sampling_depth(d, counts)[2]
res = minimize_scalar(f,
bounds=(count_summary['min'], count_summary['max']),
method='bounded')
return int(np.floor(res.x))
def get_default_even_sampling_depth(biom):
counts = biom.sum()
return _get_depth_for_max_sequence_count(counts)
def explore_sampling_depth(biom):
import seaborn as sns
counts = biom.sum()
count_summary = counts.describe()
total_num_samples = len(counts)
total_num_sequences = counts.sum()
depth_for_max_sequence_count = _get_depth_for_max_sequence_count(counts)
sampling_depth_slider = IntSlider(min=count_summary['min'],
max=count_summary['max'],
step=10 ** (math.log(count_summary['max'], 10) - 2),
value=depth_for_max_sequence_count)
default_samples_retained, default_num_samples_retained, default_num_sequences_retained = \
_summarize_even_sampling_depth(depth_for_max_sequence_count, counts)
default_percent_samples_retained = default_num_samples_retained * 100 / total_num_samples
default_percent_sequences_retained = default_num_sequences_retained * 100 / total_num_sequences
label_s = "Depth {0}: {1:.2f}% of sequences and {2:.2f}% of samples retained."
def f(even_sampling_depth):
samples_retained, num_samples_retained, num_sequences_retained = \
_summarize_even_sampling_depth(even_sampling_depth, counts)
percent_samples_retained = num_samples_retained * 100 / total_num_samples
percent_sequences_retained = num_sequences_retained * 100 / total_num_sequences
ax = sns.distplot(counts)
ax.set_xlabel("Number of sequences per sample")
ax.set_ylabel("Frequency")
line_label = label_s.format(depth_for_max_sequence_count,
default_percent_sequences_retained,
default_percent_samples_retained)
ax.plot([depth_for_max_sequence_count, depth_for_max_sequence_count], ax.get_ylim(),
'k--', label=line_label)
line_label = label_s.format(even_sampling_depth,
percent_sequences_retained,
percent_samples_retained)
ax.plot([even_sampling_depth, even_sampling_depth], ax.get_ylim(),
'k-', label=line_label)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def reset_depth(_):
sampling_depth_slider.value = depth_for_max_sequence_count
reset = widgets.Button(icon='fa-refresh')
reset.on_click(reset_depth)
w = interactive(f, even_sampling_depth=sampling_depth_slider)
display(widgets.HBox(children=[w, reset]))
def rarify(biom, even_sampling_depth):
data = []
sample_ids = []
for e in biom.columns:
count_vector = biom[e]
if count_vector.sum() < even_sampling_depth:
continue
else:
sample_ids.append(e)
data.append(subsample_counts(count_vector.astype(int), even_sampling_depth))
return pd.DataFrame(np.asarray(data).T, index=biom.index, columns=sample_ids)
def filter_dm_and_map(dm, map_df):
ids_to_exclude = set(dm.ids) - set(map_df.index.values)
ids_to_keep = set(dm.ids) - ids_to_exclude
filtered_dm = dm.filter(ids_to_keep)
filtered_map = map_df.loc[ids_to_keep]
return filtered_dm, filtered_map
def get_within_between_distances(map_df, dm, col):
filtered_dm, filtered_map = filter_dm_and_map(dm, map_df)
groups = []
distances = []
map_dict = filtered_map[col].to_dict()
for id_1, id_2 in itertools.combinations(filtered_map.index.tolist(), 2):
row = []
if map_dict[id_1] == map_dict[id_2]:
groups.append('Within')
else:
groups.append('Between')
distances.append(filtered_dm[(id_1, id_2)])
groups = zip(groups, distances)
distances_df = pd.DataFrame(data=list(groups), columns=['Groups', 'Distance'])
return distances_df
def distance_histogram(dm, category, metadata, metric='Distance', order=['Within', 'Between']):
import seaborn as sns
within_bw_distances = get_within_between_distances(metadata, dm, category)
ax = sns.violinplot(x='Groups', y='Distance', data=within_bw_distances, order=order, orient='v')
ax.set_xlabel(category)
ax.set_ylabel(metric)
def interactive_distance_histograms(dm, sample_metadata):
def on_update(category, metadata, check_within, check_between):
order = []
if check_within:
order.append('Within')
if check_between:
order.append('Between')
distance_histogram(dm, category, metadata, order=order)
check_within = widgets.Checkbox(description='Show within category', value=True)
check_between = widgets.Checkbox(description='Show between category', value=True)
extras = widgets.VBox(children=[check_within, check_between])
return metadata_controls(sample_metadata, on_update, extras)
def distance_violinplots(dm, category, metadata, metric=None, order=['Within', 'Between']):
import seaborn as sns
within_bw_distances = get_within_between_distances(metadata, dm, category)
ax = sns.violinplot(x='Groups', y='Distance', data=within_bw_distances, order=order, orient='v')
ax.set_xlabel(category)
ax.set_ylabel(metric)
return ax
def interactive_distance_violinplots(dms, sample_metadata):
def on_update(category, metadata, metric, check_within, check_between):
order = []
if check_within:
order.append('Within')
if check_between:
order.append('Between')
dm = dms[metric]
distance_violinplots(dm, category, metadata, metric, order=order)
check_within = widgets.Checkbox(description='Show within category', value=True)
check_between = widgets.Checkbox(description='Show between category', value=True)
metric_but = widgets.Dropdown(options=list(dms.keys()), description='Metrics')
extras = widgets.VBox(children=[metric_but, check_within, check_between])
return metadata_controls(sample_metadata, on_update, extras)
def compute_distance_matrices(
otu_table,
tree=None,
metrics=['weighted_unifrac', 'unweighted_unifrac', 'braycurtis', 'jaccard']):
dms = {}
for metric in metrics:
dm = pw_distances(metric, otu_table.T.values, otu_table.columns.tolist(),
tree=tree, otu_ids=otu_table.index.tolist())
dms[metric] = dm
return dms
def interactive_plot_pcoa(metadata, dms):
def on_update(category, metadata, metric):
dm = dms[metric]
filtered_dm, _ = filter_dm_and_map(dm, metadata)
pc = pcoa(filtered_dm)
pc.plot(df=metadata,
column=category,
axis_labels=['PC 1', 'PC 2', 'PC 3'],
s=35).set_size_inches(12, 9)
metric_but = widgets.Dropdown(options=list(dms.keys()), description='Metrics')
extras = widgets.VBox(children=[metric_but])
return metadata_controls(metadata, on_update, extras)
|
bsd-3-clause
|
joostvanzwieten/nutils
|
examples/platewithhole-nurbs.py
|
1
|
5816
|
#!/usr/bin/env python3
#
# In this script we solve the same infinite plane strain problem as in
# :ref:`examples/platewithhole.py`, but instead of using FCM to create the hole
# we use a NURBS-based mapping. A detailed description of the testcase can be
# found in Hughes et al., `Isogeometric analysis: CAD, finite elements, NURBS,
# exact geometry and mesh refinement`, Computer Methods in Applied Mechanics
# and Engineering, Elsevier, 2005, 194, 4135-4195.
from nutils import mesh, function, solver, export, cli, testing
import numpy, treelog
def main(nrefine:int, traction:float, radius:float, poisson:float):
'''
Horizontally loaded linear elastic plate with IGA hole.
.. arguments::
nrefine [2]
Number of uniform refinements starting from 1x2 base mesh.
traction [.1]
Far field traction (relative to Young's modulus).
radius [.5]
Cut-out radius.
poisson [.3]
Poisson's ratio, nonnegative and strictly smaller than 1/2.
'''
# create the coarsest level parameter domain
domain, geom0 = mesh.rectilinear([1, 2])
bsplinebasis = domain.basis('spline', degree=2)
controlweights = numpy.ones(12)
controlweights[1:3] = .5 + .25 * numpy.sqrt(2)
weightfunc = bsplinebasis.dot(controlweights)
nurbsbasis = bsplinebasis * controlweights / weightfunc
# create geometry function
indices = [0,2], [1,2], [2,1], [2,0]
controlpoints = numpy.concatenate([
numpy.take([0, 2**.5-1, 1], indices) * radius,
numpy.take([0, .3, 1], indices) * (radius+1) / 2,
numpy.take([0, 1, 1], indices)])
geom = (nurbsbasis[:,numpy.newaxis] * controlpoints).sum(0)
radiuserr = domain.boundary['left'].integral((function.norm2(geom) - radius)**2 * function.J(geom0), degree=9).eval()**.5
treelog.info('hole radius exact up to L2 error {:.2e}'.format(radiuserr))
# refine domain
if nrefine:
domain = domain.refine(nrefine)
bsplinebasis = domain.basis('spline', degree=2)
controlweights = domain.project(weightfunc, onto=bsplinebasis, geometry=geom0, ischeme='gauss9')
nurbsbasis = bsplinebasis * controlweights / weightfunc
ns = function.Namespace()
ns.x = geom
ns.lmbda = 2 * poisson
ns.mu = 1 - poisson
ns.ubasis = nurbsbasis.vector(2)
ns.u_i = 'ubasis_ni ?lhs_n'
ns.X_i = 'x_i + u_i'
ns.strain_ij = '(d(u_i, x_j) + d(u_j, x_i)) / 2'
ns.stress_ij = 'lmbda strain_kk δ_ij + 2 mu strain_ij'
ns.r2 = 'x_k x_k'
ns.R2 = radius**2 / ns.r2
ns.k = (3-poisson) / (1+poisson) # plane stress parameter
ns.scale = traction * (1+poisson) / 2
ns.uexact_i = 'scale (x_i ((k + 1) (0.5 + R2) + (1 - R2) R2 (x_0^2 - 3 x_1^2) / r2) - 2 δ_i1 x_1 (1 + (k - 1 + R2) R2))'
ns.du_i = 'u_i - uexact_i'
sqr = domain.boundary['top,bottom'].integral('(u_i n_i)^2 J(x)' @ ns, degree=9)
cons = solver.optimize('lhs', sqr, droptol=1e-15)
sqr = domain.boundary['right'].integral('du_k du_k J(x)' @ ns, degree=20)
cons = solver.optimize('lhs', sqr, droptol=1e-15, constrain=cons)
# construct residual
res = domain.integral('d(ubasis_ni, x_j) stress_ij J(x)' @ ns, degree=9)
# solve system
lhs = solver.solve_linear('lhs', res, constrain=cons)
# vizualize result
bezier = domain.sample('bezier', 9)
X, stressxx = bezier.eval(['X', 'stress_00'] @ ns, lhs=lhs)
export.triplot('stressxx.png', X, stressxx, tri=bezier.tri, hull=bezier.hull, clim=(numpy.nanmin(stressxx), numpy.nanmax(stressxx)))
# evaluate error
err = domain.integral('<du_k du_k, sum:ij(d(du_i, x_j)^2)>_n J(x)' @ ns, degree=9).eval(lhs=lhs)**.5
treelog.user('errors: L2={:.2e}, H1={:.2e}'.format(*err))
return err, cons, lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to keep with the default arguments simply run :sh:`python3
# platewithhole-nurbs.py`.
if __name__ == '__main__':
cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(testing.TestCase):
@testing.requires('matplotlib')
def test0(self):
err, cons, lhs = main(nrefine=0, traction=.1, radius=.5, poisson=.3)
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00199, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .02269, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjYGBoQIIggMZXOKdmnHRe3vjh+cvGDAwA6w0LgQ==''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNpjYJh07qLhhnOTjb0vTDdmAAKVcy/1u85lGYforQDzFc6pGSedlzd+eP4ykA8AvkQRaA==''')
@testing.requires('matplotlib')
def test2(self):
err, cons, lhs = main(nrefine=2, traction=.1, radius=.5, poisson=.3)
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00009, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .00286, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjYGBoIAKCwCBXp3kuysDjnLXR+3NPjTzPqxrnAnHeeQvjk+dTjZ9d2GG85soJYwYGAPkhPtE=''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNpjYOg890mv85yM4axz0kYHz+00Yj6vZJxzPtWY+0KPMffFucaml+caMwBB5LlCvYhzCw0qzu0wPHyu
0sjlPIsx14VoY/6LvcaxlxYZz7myCKzO+dwWPZdzBwzqz20z/Hguxmj2+TtGHRdsjHdfbDB2v7zUeMXV
pWB1VucC9B3OORmuOCdhZHR+ktGu87eNbC6oGstfLDA+eWm1seG19WB1Buf+6ruce2p469wco9Dzb4wm
n2c23nZe3djqQqpx88XNxrOv7gOr0zwXZeBxztro/bmnRp7nVY1zgTjvvIXxSaBfnl3YYbzmygmgOgDU
Imlr''')
|
mit
|
trmznt/fatools
|
fatools/lib/analytics/analyticalset.py
|
2
|
9336
|
from collections import defaultdict
from fatools.lib.analytics.dataframes import AlleleDataFrame
from pandas import pivot_table
from pprint import pprint
class AnalyticalSet(object):
""" AnalyticalSet
_allele_df: dataframe of (marker_id, sample_id, value, size, height, assay_id)
_marker_df:
"""
def __init__(self, sample_set, params, marker_ids, dbh):
assert sample_set and params and dbh
self._sample_set = sample_set
self._params = params
if not marker_ids:
marker_ids = params.get_marker_ids(dbh)
self._marker_ids = marker_ids
self._allele_df = AlleleDataFrame(dbh,
sample_ids = self.sample_ids,
marker_ids = marker_ids,
params = params
)
# placeholder
self._marker_df = None
self._filtered_sample_ids = None
self._filtered_marker_ids = None
self._sample_marker = None
self._sample_genotyped_dist = None
self._marker_genotyped_dist = None
@property
def label(self):
return self._sample_set.label
@property
def sample_set(self):
return self._sample_set
@property
def marker_ids(self):
return self._allele_df.marker_ids
@property
def sample_ids(self):
return self._sample_set.sample_ids
@property
def allele_df(self):
return self._allele_df
@property
def colour(self):
return self._sample_set.colour
@property
def N(self):
return self._sample_set.N
@property
def marker_df(self):
""" return a dataframe of:
sample_id marker_id1 marker_id2 marker_id3
1 2 1 0
where value = number of alleles in the marker for this particular sample
"""
if self._marker_df is None:
self._marker_df = pivot_table( self._allele_df.df,
index = 'sample_id', columns = 'marker_id', values='value', aggfunc = len )
return self._marker_df
@property
def sample_marker(self):
if self._sample_marker is None:
self._sample_marker = {}
for (idx, marker_id, sample_id, value, size, height, assay_id, allele_id, ratio, rank) in self.allele_df.df.itertuples():
try:
self._sample_marker[sample_id].add( marker_id )
except KeyError:
self._sample_marker[sample_id] = { marker_id } # create initial set
return self._sample_marker
def get_filtered_sample_ids(self):
""" get sample_ids that passed sample quality assessment """
if self._filtered_sample_ids is None:
self._filtered_sample_ids, self._sample_genotyped_dist = self.assess_sample_quality()
# further filtering for sample: none, strict/low-complexity or unique haplotype
# N / S / U
sample_filtering = self._params.sample_filtering.upper()
if sample_filtering == 'M':
# monoclonal samples
sample_mult = self.allele_df.sample_multiplicity
monoclonal_ids = set( int(x) for x in
sample_mult[sample_mult == 1].index.values )
self._filtered_sample_ids = self._filtered_sample_ids & monoclonal_ids
elif sample_filtering == 'S':
# strict / low-complexity samples
# get sample_ids from strict / low_complexity samples
locus_mult = self.allele_df.locus_multiplicity
locus_mult_dist = locus_mult.sum(1)
low_complex_ids = set( int(x) for x in
locus_mult_dist[locus_mult_dist <= 1].index.values )
self._filtered_sample_ids = self._filtered_sample_ids & low_complex_ids
elif sample_filtering == 'U':
# only unique haplotype samples
# get index from unique haplotypes
unique_ids = set( int(x) for x in self.allele_df.unique_mlgt.index.values )
self._filtered_sample_ids = self._filtered_sample_ids & unique_ids
return self._filtered_sample_ids
def get_filtered_marker_ids(self):
""" get marker_ids that passed marker quality assessment """
if self._filtered_marker_ids is None:
self._filtered_marker_ids, self._marker_genotyped_dist = self.assess_marker_quality()
return self._filtered_marker_ids
def get_sample_genotyped_distribution(self):
if self._sample_genotyped_dist is None:
self.get_filtered_sample_ids()
return self._sample_genotyped_dist
def get_marker_genotyped_distribution(self):
if self._marker_genotyped_dist is None:
self.get_filtered_marker_ids()
return self._marker_genotyped_dist
def assess_sample_quality(self, sample_qual_threshold = -1):
""" assess sample based on successful genotyped markers
param: sample_qual_threshold
"""
sample_quality = [ (s_id, len(m)) for s_id, m in self.sample_marker.items() ]
genotyped_dist = [ x[1] for x in sample_quality ]
#n = max(genotyped_dist)
n = len(self.marker_ids)
if sample_qual_threshold < 0:
sample_qual_threshold = self._params.sample_qual_threshold
threshold = n * sample_qual_threshold
passed_sample_ids = set([ int(x[0]) for x in sample_quality if x[1] >= threshold ])
#failed_samples = len(sample_quality) - len(passed_sample_ids)
return (passed_sample_ids, genotyped_dist)
def assess_marker_quality(self, marker_qual_threshold = -1):
""" assess marker based on successful genotyped samples, which must be done on
all samples!!
param: marker_qual_threshold
"""
marker_genotyped = []
for marker_id in self.marker_ids:
# check of any x > 0 for marker_df[marker_id] = [ 2 1 0 0 0 ]
genotyped = 0
for m in self.marker_df.get(marker_id, [0]):
if m > 0:
genotyped += 1
marker_genotyped.append( (marker_id, genotyped) )
if marker_qual_threshold < 0:
marker_qual_threshold = self._params.marker_qual_threshold
threshold = len(self.sample_ids) * marker_qual_threshold
passed_marker_ids = set([ x[0] for x in marker_genotyped if x[1] >= threshold ])
return (passed_marker_ids, marker_genotyped)
def get_filtered_analytical_set(self, sample_ids=None, marker_ids=None):
if not (sample_ids or marker_ids):
return None
raise NotImplementedError()
class AnalyticalSetContainer(list):
def __init__(self, sample_sets, params, marker_ids, dbh):
super().__init__()
self._sample_sets = sample_sets
self._params = params
self._total_samples = 0
self._sample_ids = set()
self._marker_ids = set(marker_ids) if marker_ids else None
for s in self._sample_sets:
if len(s) <= 0: continue
a_set = AnalyticalSet( s, params, marker_ids, dbh )
self.append( a_set )
self._total_samples += s.N
self._sample_ids.update( s.sample_ids )
# checking marker_ids consistency
if not self._marker_ids:
self._marker_ids = set(a_set.marker_ids)
elif (self._marker_ids ^ set(a_set.marker_ids)):
raise RuntimeError(
'Inconsistence marker set for sample set: %s' % a_set.label)
def get_sample_sets(self):
return self._sample_sets
def get_filtered_sample_ids(self):
""" return a filtered sample_ids, ie sample with minimum number of marker """
sample_ids = set()
for s in self:
sample_ids.update( s.get_filtered_sample_ids() )
return sample_ids
def get_filtered_marker_ids(self):
""" return a filtered marker_ids from total of all analytical sets """
marker_counts = self.assess_marker_quality()
threshold = self.total_samples * self._params.marker_qual_threshold
marker_ids = [ x[0] for x in marker_counts.items() if x[1] > threshold ]
return marker_ids
def assess_marker_quality(self):
""" assess marker quality """
# collect all necessary data from each analyticalset
marker_counts = defaultdict( int )
for s in self:
for (marker_id, count) in s.get_marker_genotyped_distribution():
marker_counts[marker_id] += count
return marker_counts
def get_filtered_analytical_sets(self):
""" return a filtered analytical set container """
raise NotImplementedError
@property
def total_samples(self):
return self._total_samples
@property
def sample_ids(self):
return self._sample_ids
@property
def marker_ids(self):
return self._marker_ids
def get_analytical_sets(dbh, sample_sets, params, marker_ids=None):
assert sample_sets and params
sets = AnalyticalSetContainer( sample_sets, params, marker_ids, dbh )
return sets
|
lgpl-3.0
|
RPGOne/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
84
|
7866
|
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
|
bsd-3-clause
|
beni55/hyperopt
|
hyperopt/tests/test_vectorize.py
|
7
|
7707
|
import numpy as np
from hyperopt.pyll import as_apply, scope, rec_eval, clone, dfs
from hyperopt.pyll.stochastic import recursive_set_rng_kwarg
from hyperopt import base, fmin, rand
from hyperopt.vectorize import VectorizeHelper
from hyperopt.vectorize import replace_repeat_stochastic
from hyperopt.pyll_utils import hp_choice
from hyperopt.pyll_utils import hp_uniform
from hyperopt.pyll_utils import hp_quniform
from hyperopt.pyll_utils import hp_loguniform
from hyperopt.pyll_utils import hp_qloguniform
def config0():
p0 = scope.uniform(0, 1)
p1 = scope.uniform(2, 3)
p2 = scope.one_of(-1, p0)
p3 = scope.one_of(-2, p1)
p4 = 1
p5 = [3, 4, p0]
p6 = scope.one_of(-3, p1)
d = locals()
d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
s = as_apply(d)
return s
def test_clone():
config = config0()
config2 = clone(config)
nodeset = set(dfs(config))
assert not any(n in nodeset for n in dfs(config2))
foo = recursive_set_rng_kwarg(
config,
scope.rng_from_seed(5))
r = rec_eval(foo)
print r
r2 = rec_eval(
recursive_set_rng_kwarg(
config2,
scope.rng_from_seed(5)))
print r2
assert r == r2
def test_vectorize_trivial():
N = as_apply(15)
p0 = hp_uniform('p0', 0, 1)
loss = p0
print loss
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
full_output = as_apply([vloss,
vh.idxs_by_label(),
vh.vals_by_label()])
fo2 = replace_repeat_stochastic(full_output)
new_vc = recursive_set_rng_kwarg(
fo2,
as_apply(np.random.RandomState(1)),
)
#print new_vc
losses, idxs, vals = rec_eval(new_vc)
print 'losses', losses
print 'idxs p0', idxs['p0']
print 'vals p0', vals['p0']
p0dct = dict(zip(idxs['p0'], vals['p0']))
for ii, li in enumerate(losses):
assert p0dct[ii] == li
def test_vectorize_simple():
N = as_apply(15)
p0 = hp_uniform('p0', 0, 1)
loss = p0 ** 2
print loss
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
full_output = as_apply([vloss,
vh.idxs_by_label(),
vh.vals_by_label()])
fo2 = replace_repeat_stochastic(full_output)
new_vc = recursive_set_rng_kwarg(
fo2,
as_apply(np.random.RandomState(1)),
)
#print new_vc
losses, idxs, vals = rec_eval(new_vc)
print 'losses', losses
print 'idxs p0', idxs['p0']
print 'vals p0', vals['p0']
p0dct = dict(zip(idxs['p0'], vals['p0']))
for ii, li in enumerate(losses):
assert p0dct[ii] ** 2 == li
def test_vectorize_multipath():
N = as_apply(15)
p0 = hp_uniform('p0', 0, 1)
loss = hp_choice('p1', [1, p0, -p0]) ** 2
expr_idxs = scope.range(N)
vh = VectorizeHelper(loss, expr_idxs, build=True)
vloss = vh.v_expr
print vloss
full_output = as_apply([vloss,
vh.idxs_by_label(),
vh.vals_by_label()])
new_vc = recursive_set_rng_kwarg(
full_output,
as_apply(np.random.RandomState(1)),
)
losses, idxs, vals = rec_eval(new_vc)
print 'losses', losses
print 'idxs p0', idxs['p0']
print 'vals p0', vals['p0']
print 'idxs p1', idxs['p1']
print 'vals p1', vals['p1']
p0dct = dict(zip(idxs['p0'], vals['p0']))
p1dct = dict(zip(idxs['p1'], vals['p1']))
for ii, li in enumerate(losses):
print ii, li
if p1dct[ii] != 0:
assert li == p0dct[ii] ** 2
else:
assert li == 1
def test_vectorize_config0():
p0 = hp_uniform('p0', 0, 1)
p1 = hp_loguniform('p1', 2, 3)
p2 = hp_choice('p2', [-1, p0])
p3 = hp_choice('p3', [-2, p1])
p4 = 1
p5 = [3, 4, p0]
p6 = hp_choice('p6', [-3, p1])
d = locals()
d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
config = as_apply(d)
N = as_apply('N:TBA')
expr = config
expr_idxs = scope.range(N)
vh = VectorizeHelper(expr, expr_idxs, build=True)
vconfig = vh.v_expr
full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])
if 1:
print '=' * 80
print 'VECTORIZED'
print full_output
print '\n' * 1
fo2 = replace_repeat_stochastic(full_output)
if 0:
print '=' * 80
print 'VECTORIZED STOCHASTIC'
print fo2
print '\n' * 1
new_vc = recursive_set_rng_kwarg(
fo2,
as_apply(np.random.RandomState(1))
)
if 0:
print '=' * 80
print 'VECTORIZED STOCHASTIC WITH RNGS'
print new_vc
Nval = 10
foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})
print 'foo[0]', foo[0]
print 'foo[1]', foo[1]
assert len(foo) == Nval
if 0: # XXX refresh these values to lock down sampler
assert foo[0] == {
'p0': 0.39676747423066994,
'p1': None,
'p2': 0.39676747423066994,
'p3': 2.1281244479293568,
'p4': 1,
'p5': (3, 4, 0.39676747423066994) }
assert foo[1] != foo[2]
print idxs
print vals['p3']
print vals['p6']
print idxs['p1']
print vals['p1']
assert len(vals['p3']) == Nval
assert len(vals['p6']) == Nval
assert len(idxs['p1']) < Nval
p1d = dict(zip(idxs['p1'], vals['p1']))
for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
if p3v == p6v == 0:
assert ii not in idxs['p1']
if p3v:
assert foo[ii]['p3'] == p1d[ii]
if p6v:
print 'p6', foo[ii]['p6'], p1d[ii]
assert foo[ii]['p6'] == p1d[ii]
def test_distributions():
# test that the distributions come out right
# XXX: test more distributions
space = {
'loss': (
hp_loguniform('lu', -2, 2) +
hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
hp_quniform('qu', -4.999, 5, 1) +
hp_uniform('u', 0, 10)),
'status': 'ok'}
trials = base.Trials()
N = 1000
fmin(lambda x: x,
space=space,
algo=rand.suggest,
trials=trials,
max_evals=N,
rstate=np.random.RandomState(124),
catch_eval_exceptions=False)
assert len(trials) == N
idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
print idxs.keys()
COUNTMAX = 130
COUNTMIN = 70
# -- loguniform
log_lu = np.log(vals['lu'])
assert len(log_lu) == N
assert -2 < np.min(log_lu)
assert np.max(log_lu) < 2
h = np.histogram(log_lu)[0]
print h
assert np.all(COUNTMIN < h)
assert np.all(h < COUNTMAX)
# -- quantized log uniform
qlu = vals['qlu']
assert np.all(np.fmod(qlu, 2) == 0)
assert np.min(qlu) == 2
assert np.max(qlu) == 20
bc_qlu = np.bincount(qlu)
assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]
# -- quantized uniform
qu = vals['qu']
assert np.min(qu) == -5
assert np.max(qu) == 5
assert np.all(np.fmod(qu, 1) == 0)
bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
assert np.all(40 < bc_qu), bc_qu # XXX: how to get the distribution flat
# with new rounding rule?
assert np.all(bc_qu < 125), bc_qu
assert np.all(bc_qu < COUNTMAX)
# -- uniform
u = vals['u']
assert np.min(u) > 0
assert np.max(u) < 10
h = np.histogram(u)[0]
print h
assert np.all(COUNTMIN < h)
assert np.all(h < COUNTMAX)
#import matplotlib.pyplot as plt
#plt.hist(np.log(vals['node_2']))
#plt.show()
|
bsd-3-clause
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/recipes/placing_text_boxes.py
|
1
|
1835
|
"""
Placing text boxes
==================
When decorating axes with text boxes, two useful tricks are to place
the text in axes coordinates (see :ref:`sphx_glr_tutorials_advanced_transforms_tutorial.py`), so the
text doesn't move around with changes in x or y limits. You can also
use the ``bbox`` property of text to surround the text with a
:class:`~matplotlib.patches.Patch` instance -- the ``bbox`` keyword
argument takes a dictionary with keys that are Patch properties.
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(1234)
fig, ax = plt.subplots()
x = 30*np.random.randn(10000)
mu = x.mean()
median = np.median(x)
sigma = x.std()
textstr = '$\mu=%.2f$\n$\mathrm{median}=%.2f$\n$\sigma=%.2f$' % (mu, median, sigma)
ax.hist(x, 50)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
pltshow(plt)
|
mit
|
wlamond/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
|
104
|
3139
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
nkmk/python-snippets
|
notebook/pandas_datareader_test.py
|
1
|
4388
|
import pandas_datareader.data as web
import datetime
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2015, 12, 31)
f = web.DataReader('SNE', 'yahoo', start, end)
print(f)
# Open High Low Close Adj Close Volume
# Date
# 2010-01-04 29.520000 30.180000 29.500000 30.020000 30.020000 988800
# 2010-01-05 29.719999 29.930000 29.500000 29.879999 29.879999 567800
# 2010-01-06 29.879999 29.950001 29.660000 29.850000 29.850000 468200
# 2010-01-07 29.740000 29.870001 29.590000 29.799999 29.799999 645300
# 2010-01-08 30.040001 30.469999 29.930000 30.410000 30.410000 574100
# ... ... ... ... ... ... ...
# 2015-12-24 24.580000 24.780001 24.570000 24.700001 24.700001 467400
# 2015-12-28 24.709999 24.790001 24.530001 24.670000 24.670000 590100
# 2015-12-29 24.860001 24.900000 24.680000 24.830000 24.830000 657600
# 2015-12-30 24.780001 24.900000 24.700001 24.719999 24.719999 344900
# 2015-12-31 24.670000 24.770000 24.510000 24.610001 24.610001 688900
# [1511 rows x 6 columns]
f = web.DataReader(['SNE', 'AAPL'], 'yahoo', start, end)
print(f['Adj Close'])
# AAPL SNE
# Date
# 2015-12-31 101.703697 24.610001
# 2015-12-30 103.694107 24.719999
# 2015-12-29 105.066116 24.830000
# 2015-12-28 103.210999 24.670000
# 2015-12-24 104.380112 24.700001
# ... ... ...
# 2010-01-08 27.244156 30.410000
# 2010-01-07 27.064222 29.799999
# 2010-01-06 27.114347 29.850000
# 2010-01-05 27.552608 29.879999
# 2010-01-04 27.505054 30.020000
# [1511 rows x 2 columns]
print(f)
# <class 'pandas.core.panel.Panel'>
# Dimensions: 6 (items) x 1511 (major_axis) x 2 (minor_axis)
# Items axis: Adj Close to Volume
# Major_axis axis: 2015-12-31 00:00:00 to 2009-12-31 00:00:00
# Minor_axis axis: AAPL to SNE
import matplotlib.pyplot as plt
f['Adj Close']['SNE'] /= f['Adj Close']['SNE'][-1]
f['Adj Close']['AAPL'] /= f['Adj Close']['AAPL'][-1]
f['Adj Close'].plot(title='SNE vs AAPL', grid=True)
# plt.show()
plt.savefig('data/dst/pandas_datareader_yahoo.png')
# 
from pandas_datareader import wb
f = wb.download(indicator='SP.POP.TOTL', country=['JP', 'US'],
start=1960, end=2014)
print(f)
# SP.POP.TOTL
# country year
# Japan 2014 127276000
# 2013 127445000
# 2012 127629000
# 2011 127833000
# 2010 128070000
# ... ...
# United States 1989 246819000
# 1988 244499000
# 1987 242289000
# ... ...
# 1962 186538000
# 1961 183691000
# 1960 180671000
# [110 rows x 1 columns]
print(wb.search('gdp.*capita.*const').iloc[:, :2])
# id name
# 685 6.0.GDPpc_constant GDP per capita, PPP (constant 2011 internation...
# 8086 NY.GDP.PCAP.KD GDP per capita (constant 2010 US$)
# 8088 NY.GDP.PCAP.KN GDP per capita (constant LCU)
# 8090 NY.GDP.PCAP.PP.KD GDP per capita, PPP (constant 2011 internation...
# 8091 NY.GDP.PCAP.PP.KD.87 GDP per capita, PPP (constant 1987 internation...
f = wb.download(indicator='SP.POP.TOTL', country=['JP', 'US'],
start=1960, end=2014)
f2 = f.unstack(level=0)
print(f2)
# SP.POP.TOTL
# country Japan United States
# year
# 1960 92500572 180671000
# 1961 94943000 183691000
# 1962 95832000 186538000
# 1963 96812000 189242000
# 1964 97826000 191889000
# ... ...
# 2010 128070000 309348193
# 2011 127833000 311663358
# 2012 127629000 313998379
# 2013 127445000 316204908
# 2014 127276000 318563456
f2.columns = ['Japan', 'United States']
f2.plot(grid=True)
# plt.show()
plt.savefig('data/dst/pandas_datareader_wb.png')
# 
|
mit
|
Insight-book/data-science-from-scratch
|
first-edition-ko/code/ch08_gradient_descent.py
|
3
|
6372
|
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
from collections import Counter
import math, random
from ch04_linear_algebra import distance, vector_subtract, scalar_multiply
# 8.1. 경사 하강법에 숨은 의미
def sum_of_squares(v):
""" Computes the sum of squared elements in v """
return sum(v_i ** 2 for v_i in v)
# 8.2. 경사 추정하기
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
""" 그림 8-3. 나름 유용한 미분 근사값 """
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# 두 계산식에 따른 결과값이 거의 비슷함을 보여주기 위한 그래프
import matplotlib.pyplot as plt
x = range(-10, 10)
plt.title("실제 미분값 vs. 근사값")
plt.plot(x, map(derivative, x), 'rx', label='실제값') # 빨간색 x
plt.plot(x, map(derivative_estimate, x), 'b+', label='근사값') # 파란색 +
plt.legend(loc=9) # 상단 중앙 범례
plt.show() # 보라색 *가 나와주길!
def partial_difference_quotient(f, v, i, h):
""" 함수 f의 i번째 편도함수가 v에서 가지는 값 """
w = [v_j + (h if j == i else 0) # h를 v의 i번째 변수에만 더해주자
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
# 8.3. 경사 적용하기
def step(v, direction, step_size):
""" move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
# 8.4. 적절한 스텝 크기 정하기
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
# 8.5. 전부 조합하기
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
# 8.6. SGD
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
|
unlicense
|
interactiveaudiolab/nussl
|
docs/examples/benchmark/ideal_binary_mask.py
|
1
|
1348
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ideal Binary Mask
#
# +
import nussl
import matplotlib.pyplot as plt
import time
start_time = time.time()
def visualize_and_embed(sources):
plt.figure(figsize=(10, 6))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks(sources,
y_axis='mel', db_cutoff=-40, alpha_amount=2.0)
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform(
sources, show_legend=False)
plt.show()
nussl.play_utils.multitrack(sources)
musdb = nussl.datasets.MUSDB18(
download=True, sample_rate=16000,
strict_sample_rate = False
)
i = 39
item = musdb[i]
mix = item['mix']
source_names = sorted(list(item['sources'].keys()))
sources = [item['sources'][k] for k in source_names]
# -
separator = nussl.separation.benchmark.IdealBinaryMask(
mix, sources)
estimates = separator()
estimates = {
source_names[i]: e for i, e in enumerate(estimates)
}
visualize_and_embed(estimates)
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
|
mit
|
google-coral/demo-manufacturing
|
models/retraining/train_classifier.py
|
1
|
9469
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflite_runtime.interpreter import load_delegate
from tflite_runtime.interpreter import Interpreter
import glob
import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
input_size = (224, 224)
input_shape = (224, 224, 3)
batch_size = 1
###########################################################################################
# Load pretrained model
###########################################################################################
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
classifier_activation='softmax',
weights='imagenet')
# Freeze first 100 layers
base_model.trainable = True
for layer in base_model.layers[:100]:
layer.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(units=2, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-5),
metrics=['accuracy'])
print(model.summary())
###########################################################################################
# Prepare Datasets
###########################################################################################
train_datagen = ImageDataGenerator(rescale=1./255,
zoom_range=0.3,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
dataset_path = './dataset'
train_set_path = os.path.join(dataset_path, 'train')
val_set_path = os.path.join(dataset_path, 'test')
batch_size = 64
train_generator = train_datagen.flow_from_directory(train_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
epochs = 15
history = model.fit(train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
verbose=1)
###########################################################################################
# Plotting Train Data
###########################################################################################
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
plt.savefig('history.png')
###########################################################################################
# Post Training Quantization
###########################################################################################
def representative_data_gen():
dataset_list = tf.data.Dataset.list_files('./dataset/test/*/*')
for i in range(100):
image = next(iter(dataset_list))
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.image.resize(image, input_size)
image = tf.cast(image / 255., tf.float32)
image = tf.expand_dims(image, 0)
yield [image]
model.input.set_shape((1,) + model.input.shape[1:])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()
###########################################################################################
# Saving models
###########################################################################################
model.save('classifier.h5')
with open('classifier.tflite', 'wb') as f:
f.write(tflite_model)
###########################################################################################
# Evaluating h5
###########################################################################################
batch_images, batch_labels = next(val_generator)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('classifier_labels.txt', 'w') as f:
f.write(labels)
logits = model(batch_images)
prediction = np.argmax(logits, axis=1)
truth = np.argmax(batch_labels, axis=1)
keras_accuracy = tf.keras.metrics.Accuracy()
keras_accuracy(prediction, truth)
###########################################################################################
# Evaluating tflite
###########################################################################################
def set_input_tensor(interpreter, input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
scale, zero_point = input_details['quantization']
input_tensor[:, :] = np.uint8(input / scale + zero_point)
def classify_image(interpreter, input):
set_input_tensor(interpreter, input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
top_1 = np.argmax(output)
return top_1
interpreter = tf.lite.Interpreter('classifier.tflite')
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
tflite_accuracy = tf.keras.metrics.Accuracy()
tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Compiles model
###########################################################################################
subprocess.call(["edgetpu_compiler",
"--show_operations",
"classifier.tflite"])
###########################################################################################
# Evaluating tflite
###########################################################################################
interpreter = Interpreter('classifier_edgetpu.tflite', experimental_delegates=[
load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
edgetpu_tflite_accuracy = tf.keras.metrics.Accuracy()
edgetpu_tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Show Results
###########################################################################################
print("Raw model accuracy: {:.2%}".format(keras_accuracy.result()))
print("Quant TF Lite accuracy: {:.2%}".format(tflite_accuracy.result()))
print("EdgeTpu Quant TF Lite accuracy: {:.2%}".format(
edgetpu_tflite_accuracy.result()))
|
apache-2.0
|
asedunov/intellij-community
|
python/testData/debug/test_dataframe.py
|
23
|
1309
|
import pandas as pd
import numpy as np
df1 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22]})
print(df1) ###line 8
df2 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22],
'LABELS': ['A', 'B', 'C']})
print(df2) ##line 16
df3 = pd.DataFrame(data={'Province' : ['ON','QC','BC','AL','AL','MN','ON'],
'City' : ['Toronto','Montreal','Vancouver','Calgary','Edmonton','Winnipeg','Windsor'],
'Sales' : [13,6,16,8,4,3,1]})
table = pd.pivot_table(df3,values=['Sales'],index=['Province'],columns=['City'],aggfunc=np.sum,margins=True)
table.stack('City')
print(df3)
df4 = pd.DataFrame({'row': np.random.random(10000),
'One_X': np.random.random(10000),
'One_Y': np.random.random(10000),
'Two_X': np.random.random(10000),
'Two_Y': np.random.random(10000),
'LABELS': ['A'] * 10000})
print(df4) ##line 31
|
apache-2.0
|
hugobowne/scikit-learn
|
benchmarks/bench_plot_svd.py
|
325
|
2899
|
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
|
bsd-3-clause
|
great-expectations/great_expectations
|
great_expectations/expectations/metrics/column_aggregate_metrics/column_values_between_count.py
|
1
|
9139
|
from typing import Any, Dict, Tuple
import numpy as np
from great_expectations.core.util import get_sql_dialect_floating_point_infinity_value
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.import_manager import sa
from great_expectations.expectations.metrics.metric_provider import (
MetricProvider,
metric_value,
)
class ColumnValuesBetweenCount(MetricProvider):
"""This metric is an aggregate helper for rare cases."""
metric_name = "column_values.between.count"
value_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
val = df[accessor_domain_kwargs["column"]]
if min_value is not None and max_value is not None:
if strict_min and strict_max:
series = (min_value < val) and (val < max_value)
elif strict_min:
series = (min_value < val) and (val <= max_value)
elif strict_max:
series = (min_value <= val) and (val < max_value)
else:
series = (min_value <= val) and (val <= max_value)
elif min_value is None and max_value is not None:
if strict_max:
series = val < max_value
else:
series = val <= max_value
elif min_value is not None and max_value is None:
if strict_min:
series = min_value < val
else:
series = min_value <= val
else:
raise ValueError("unable to parse domain and value kwargs")
return np.count_nonzero(series)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
dialect_name = execution_engine.engine.dialect.name.lower()
if (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
min_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=True
)
if (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
min_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=False
)
if (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
max_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=True
)
if (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
max_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=False
)
(
selectable,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = sa.column(accessor_domain_kwargs["column"])
if min_value is None:
if strict_max:
condition = column < max_value
else:
condition = column <= max_value
elif max_value is None:
if strict_min:
condition = column > min_value
else:
condition = column >= min_value
else:
if strict_min and strict_max:
condition = sa.and_(column > min_value, column < max_value)
elif strict_min:
condition = sa.and_(column > min_value, column <= max_value)
elif strict_max:
condition = sa.and_(column >= min_value, column < max_value)
else:
condition = sa.and_(column >= min_value, column <= max_value)
return execution_engine.engine.execute(
sa.select([sa.func.count()]).select_from(selectable).where(condition)
).scalar()
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = df[accessor_domain_kwargs["column"]]
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
condition = column < max_value
else:
condition = column <= max_value
elif max_value is None:
if strict_min:
condition = column > min_value
else:
condition = column >= min_value
else:
if strict_min and strict_max:
condition = (column > min_value) & (column < max_value)
elif strict_min:
condition = (column > min_value) & (column <= max_value)
elif strict_max:
condition = (column >= min_value) & (column < max_value)
else:
condition = (column >= min_value) & (column <= max_value)
return df.filter(condition).count()
|
apache-2.0
|
samuel1208/scikit-learn
|
sklearn/cluster/tests/test_bicluster.py
|
226
|
9457
|
"""Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
|
bsd-3-clause
|
MechCoder/scikit-garden
|
skgarden/mondrian/ensemble/tests/test_forest.py
|
1
|
10339
|
# Most tests copied verbatim from sklearn.ensemble.tests.test_forest.py
import pickle
import numpy as np
from sklearn.base import clone
from sklearn.base import ClassifierMixin
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from skgarden import MondrianForestClassifier
from skgarden import MondrianForestRegressor
boston = load_boston()
# The time of split and feature chosen for splitting are highly
# scale-sensitive.
scaler = MinMaxScaler()
X, y = boston.data, boston.target
y = np.round(y)
X = scaler.fit_transform(X)
ensembles = [
MondrianForestRegressor(random_state=0),
MondrianForestClassifier(random_state=0)]
def check_boston(est):
score = est.score(X, y)
assert_greater(score, 0.94, "Failed with score = %f" % score)
def test_boston():
mr = MondrianForestRegressor(n_estimators=5, random_state=0)
mr.fit(X, y)
check_boston(mr)
mr.partial_fit(X, y)
check_boston(mr)
def test_forest_attributes():
mr = MondrianForestRegressor(n_estimators=5, random_state=0)
mr.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(mr, "classes_"))
assert_false(hasattr(mr, "n_classes_"))
mr.partial_fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(mr, "classes_"))
assert_false(hasattr(mr, "n_classes_"))
mr = MondrianForestClassifier(n_estimators=5, random_state=0)
mr.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_true(hasattr(mr, "classes_"))
assert_true(hasattr(mr, "n_classes_"))
mr = MondrianForestClassifier(n_estimators=5, random_state=0)
mr.partial_fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_true(hasattr(mr, "classes_"))
assert_true(hasattr(mr, "n_classes_"))
def check_pickle(est):
score1 = est.score(X, y)
pickle_obj = pickle.dumps(est)
est2 = pickle.loads(pickle_obj)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score1, score2)
def test_pickle():
for est1 in ensembles:
est1.fit(X, y)
check_pickle(est1)
est1.partial_fit(X, y)
check_pickle(est1)
def test_parallel_train():
for curr_est in ensembles:
est = clone(curr_est)
y_pred = ([est.set_params(n_jobs=n_jobs).fit(X, y).predict(X)
for n_jobs in [1, 2, 4, 8]])
for pred1, pred2 in zip(y_pred, y_pred[1:]):
assert_array_equal(pred1, pred2)
y_pred = ([est.set_params(n_jobs=n_jobs).partial_fit(X, y).predict(X)
for n_jobs in [1, 2, 4, 8]])
for pred1, pred2 in zip(y_pred, y_pred[1:]):
assert_array_equal(pred1, pred2)
def test_min_samples_split():
min_samples_split = 4
for curr_ensemble in ensembles:
ensemble = clone(curr_ensemble)
ensemble.set_params(
min_samples_split=min_samples_split, n_estimators=100)
ensemble.fit(X, y)
for est in ensemble.estimators_:
n_samples = est.tree_.n_node_samples
leaves = est.tree_.children_left == -1
assert_true(np.all(n_samples[~leaves] >= min_samples_split))
def test_memory_layout():
for est in ensembles:
for dtype in [np.float32, np.float64]:
X_curr = np.asarray(X, dtype=dtype)
assert_array_almost_equal(est.fit(X_curr, y).predict(X_curr), y, 3)
assert_array_almost_equal(est.partial_fit(X_curr, y).predict(X_curr), y, 3)
# C-order
X_curr = np.asarray(X, order="C", dtype=dtype)
assert_array_almost_equal(est.fit(X_curr, y).predict(X_curr), y, 3)
assert_array_almost_equal(est.partial_fit(X_curr, y).predict(X_curr), y, 3)
X_curr = np.asarray(X, order="F", dtype=dtype)
assert_array_almost_equal(est.fit(X_curr, y).predict(X_curr), y, 3)
assert_array_almost_equal(est.partial_fit(X_curr, y).predict(X_curr), y, 3)
# Contiguous
X_curr = np.ascontiguousarray(X_curr, dtype=dtype)
assert_array_almost_equal(est.fit(X_curr, y).predict(X_curr), y, 3)
assert_array_almost_equal(est.partial_fit(X_curr, y).predict(X_curr), y, 3)
X_curr = np.array(X[::2], dtype=dtype)
y_curr = np.asarray(y[::2])
assert_array_almost_equal(
est.fit(X_curr, y_curr).predict(X_curr), y_curr, 3)
assert_array_almost_equal(
est.partial_fit(X_curr, y_curr).predict(X_curr), y_curr, 3)
def check_decision_path(ensemble):
indicator, col_inds = ensemble.decision_path(X)
indices, indptr = indicator.indices, indicator.indptr
n_nodes = [est.tree_.node_count for est in ensemble.estimators_]
assert_equal(indicator.shape[0], X.shape[0])
assert_equal(indicator.shape[1], sum(n_nodes))
assert_array_equal(np.diff(col_inds), n_nodes)
# Check that all leaf nodes are in the decision path.
leaf_indices = ensemble.apply(X) + np.reshape(col_inds[:-1], (1, -1))
for sample_ind, curr_leaf in enumerate(leaf_indices):
sample_indices = indices[indptr[sample_ind]: indptr[sample_ind + 1]]
assert_true(np.all(np.in1d(curr_leaf, sample_indices)))
def test_decision_path():
for ensemble in ensembles:
ensemble.fit(X, y)
check_decision_path(ensemble)
ensemble.partial_fit(X, y)
check_decision_path(ensemble)
def check_weighted_decision_path(ensemble, X_train, X_test):
# decision_path is implemented in sklearn while
# weighted_decision_path is implemented here so check
paths, col_inds = ensemble.decision_path(X_train)
weight_paths, weight_col_inds = ensemble.weighted_decision_path(X_train)
assert_array_equal(col_inds, weight_col_inds)
n_nodes = [est.tree_.node_count for est in ensemble.estimators_]
assert_equal(weight_paths.shape[0], X_train.shape[0])
assert_equal(weight_paths.shape[1], sum(n_nodes))
# We are calculating the weighted decision path on train data, so
# the weights should be concentrated at the leaves.
leaf_indices = ensemble.apply(X_train)
for est_ind, curr_leaf_indices in enumerate(leaf_indices.T):
curr_path = weight_paths[:, col_inds[est_ind]:col_inds[est_ind + 1]].toarray()
assert_array_equal(np.where(curr_path)[1], curr_leaf_indices)
# Sum of the weights across all the nodes in each estimator
# for each sample should sum up to 1.0
assert_array_almost_equal(
np.ravel(ensemble.weighted_decision_path(X_test)[0].sum(axis=1)),
ensemble.n_estimators * np.ones(X_test.shape[0]), 5)
def test_weighted_decision_path():
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.6, test_size=0.4)
for ensemble in ensembles:
ensemble.fit(X_train, y_train)
check_weighted_decision_path(ensemble, X_train, X_test)
ensemble.partial_fit(X_train, y_train)
check_weighted_decision_path(ensemble, X_train, X_test)
def check_mean_std_forest_regressor(est):
# For points completely in the training data.
# and max depth set to None.
# mean should converge to the actual target value.
# variance should converge to 0.0
mean, std = est.predict(X, return_std=True)
assert_array_almost_equal(mean, y, 5)
assert_array_almost_equal(std, 0.0, 2)
# For points completely far away from the training data, this
# should converge to the empirical mean and variance.
# X is scaled between to -1.0 and 1.0
X_inf = np.vstack((30.0 * np.ones(X.shape[1]),
-30.0 * np.ones(X.shape[1])))
inf_mean, inf_std = est.predict(X_inf, return_std=True)
assert_array_almost_equal(inf_mean, y.mean(), 1)
assert_array_almost_equal(inf_std, y.std(), 2)
def test_mean_std_forest_regressor():
mfr = MondrianForestRegressor(random_state=0)
mfr.fit(X, y)
check_mean_std_forest_regressor(mfr)
mfr.partial_fit(X, y)
check_mean_std_forest_regressor(mfr)
def check_proba_classif_convergence(est, X_train, y_train):
lb = LabelBinarizer()
y_bin = lb.fit_transform(y_train)
le = LabelEncoder()
y_enc = le.fit_transform(y_train)
proba = est.predict_proba(X_train)
labels = est.predict(X_train)
assert_array_equal(proba, y_bin)
assert_array_equal(labels, lb.inverse_transform(y_bin))
# For points completely far away from the training data, this
# should converge to the empirical distribution of labels.
X_inf = np.vstack((30.0 * np.ones(X_train.shape[1]),
-30.0 * np.ones(X_train.shape[1])))
inf_proba = est.predict_proba(X_inf)
emp_proba = np.bincount(y_enc) / float(len(y_enc))
assert_array_almost_equal(inf_proba, [emp_proba, emp_proba], 3)
def test_proba_classif_convergence():
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.6, test_size=0.4)
mfc = MondrianForestClassifier(random_state=0)
mfc.fit(X_train, y_train)
check_proba_classif_convergence(mfc, X_train, y_train)
mfc.partial_fit(X_train, y_train)
check_proba_classif_convergence(mfc, X_train, y_train)
def test_tree_identical_labels():
rng = np.random.RandomState(0)
for ensemble in ensembles:
X = rng.randn(100, 5)
y = np.ones(100)
ensemble.fit(X, y)
for est in ensemble.estimators_:
assert_equal(est.tree_.n_node_samples, [100])
if isinstance(est, ClassifierMixin):
assert_equal(est.tree_.value, [[[100]]])
else:
assert_equal(est.tree_.value, [[[1.0]]])
X = np.reshape(np.linspace(0.0, 1.0, 100), (-1, 1))
y = np.array([0.0]*50 + [1.0]*50)
ensemble.fit(X, y)
for est in ensemble.estimators_:
leaf_ids = est.tree_.children_left == -1
assert_true(np.any(est.tree_.n_node_samples[leaf_ids] > 2))
|
bsd-3-clause
|
etkirsch/scikit-learn
|
sklearn/mixture/gmm.py
|
68
|
31091
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
bsd-3-clause
|
martndj/DaleyMenard1993
|
analysis.py
|
1
|
3006
|
'''
Compute the analysis (through direct inversion of B+R innovation matrix) and output the error reduction.
For both observation and forecast errors, statistics need to be provided:
- correlation model
- correlation length
- bias (0 by default)
- variance (constant on the domain)
By default (and as it is a common hypothesis in most context), the observation error are uncorrelated.
What would be the impact of having correlated observation errors? The impact of biases?
'''
import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
from DM93 import Covariance, Uncorrelated, Foar, Soar, Gaussian
#====================================================================
#===| setup and configuration |======================================
execfile('config.py')
# -- observation errors
obsLc = None
obsCorr = Uncorrelated(grid, obsLc)
obsBias = 0.
obsVar = 1.
# -- forecast errors
fctLc = grid.L/20.
fctCorr = Soar(grid, fctLc)
fctBias = 0.
fctVar = 2.
# -- initial truth state
ampl = 10.
truth = ampl * np.exp(-grid.x**2/(grid.L/6.)**2)
#====================================================================
#===| computations |=================================================
# -- covariance matrices
B = Covariance(grid, fctVar * fctCorr.matrix)
R = Covariance(grid, obsVar * obsCorr.matrix)
# -- random error structures
fctErr = B.random(bias=fctBias)
obsErr = R.random(bias=obsBias)
# -- background state
xb = truth + fctErr
# -- observations
y = truth + obsErr
# -- analysis
SInv = np.linalg.inv(B.matrix+R.matrix)
K = B.matrix.dot(SInv)
dxa = K.dot(y-xb)
xa = xb + dxa
# -- reduction of error
error_b = grid.dx * np.sqrt(sum(fctErr**2))
error_a = grid.dx * np.sqrt(sum((xa-truth)**2))
print('background error = %.1e'%error_b)
print('analysis error = %.1e'%error_a)
print('error reduction = %.1f%%'%((error_b-error_a)/error_b*100.))
#====================================================================
#===| plots |========================================================
fig = plt.figure()
fig.subplots_adjust(wspace=0.05)
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
ax1.plot(grid.x, truth, color='k', linewidth=2, label='$x_t$')
ax1.plot(grid.x, xb, color='b', label='$x_b$')
ax1.plot(grid.x, y, color='g', marker='o', linestyle='none', label='$y$')
ax1.plot(grid.x, xa, color='r', linewidth=2, label='$x_a$')
ax2.plot( grid.x, y-xb, color='m', marker='o', markersize=4,
linestyle='none', label='$y-x_b$')
ax2.plot( grid.x, dxa, color='r', label='$\Delta x_a$')
ax2.plot( grid.x, fctErr, color='b', linestyle=':', linewidth=3,
label='$\epsilon_b$')
ax2.plot( grid.x, xa-truth, color='r', linestyle=':', linewidth=3,
label='$\epsilon_a$')
ax2.axhline(y=0, color='k')
xticklabels, xticks = grid.ticks(units=km)[:2]
ax1.set_xticks(xticks)
ax1.set_xticklabels(())
ax2.set_xlabel('$x$ [km]')
ax2.set_xticks(xticks)
ax2.set_xticklabels(xticklabels)
ax1.legend(loc='best')
ax2.legend(loc='best')
plt.show()
|
gpl-3.0
|
omarocegueda/dipy
|
doc/examples/linear_fascicle_evaluation.py
|
4
|
11614
|
"""
=================================================
Linear fascicle evaluation (LiFE)
=================================================
Evaluating the results of tractography algorithms is one of the biggest
challenges for diffusion MRI. One proposal for evaluation of tractography
results is to use a forward model that predicts the signal from each of a set of
streamlines, and then fit a linear model to these simultaneous predictions
[Pestilli2014]_.
We will use streamlines generated using probabilistic tracking on CSA
peaks. For brevity, we will include in this example only streamlines going
through the corpus callosum connecting left to right superior frontal
cortex. The process of tracking and finding these streamlines is fully
demonstrated in the `streamline_tools.py` example. If this example has been
run, we can read the streamlines from file. Otherwise, we'll run that example
first, by importing it. This provides us with all of the variables that were
created in that example:
"""
import numpy as np
import os.path as op
import nibabel as nib
import dipy.core.optimize as opt
if not op.exists('lr-superiorfrontal.trk'):
from streamline_tools import *
else:
# We'll need to know where the corpus callosum is from these variables:
from dipy.data import (read_stanford_labels,
fetch_stanford_t1,
read_stanford_t1)
hardi_img, gtab, labels_img = read_stanford_labels()
labels = labels_img.get_data()
cc_slice = labels == 2
fetch_stanford_t1()
t1 = read_stanford_t1()
t1_data = t1.get_data()
data = hardi_img.get_data()
# Read the candidates from file in voxel space:
candidate_sl = [s[0] for s in nib.trackvis.read('lr-superiorfrontal.trk',
points_space='voxel')[0]]
"""
The streamlines that are entered into the model are termed 'candidate
streamliness' (or a 'candidate connectome'):
"""
"""
Let's visualize the initial candidate group of streamlines in 3D, relative to the
anatomical structure of this brain:
"""
from dipy.viz.colormap import line_colors
from dipy.viz import fvtk
candidate_streamlines_actor = fvtk.streamtube(candidate_sl,
line_colors(candidate_sl))
cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
opacities=[1.])
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
# Add display objects to canvas
ren = fvtk.ren()
fvtk.add(ren, candidate_streamlines_actor)
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)
fvtk.record(ren, n_frames=1, out_path='life_candidates.png',
size=(800, 800))
"""
.. figure:: life_candidates.png
:align: center
**Candidate connectome before life optimization**
"""
"""
Next, we initialize a LiFE model. We import the `dipy.tracking.life` module,
which contains the classes and functions that implement the model:
"""
import dipy.tracking.life as life
fiber_model = life.FiberModel(gtab)
"""
Since we read the streamlines from a file, already in the voxel space, we do not
need to transform them into this space. Otherwise, if the streamline coordinates
were in the world space (relative to the scanner iso-center, or relative to the
mid-point of the AC-PC-connecting line), we would use this::
inv_affine = np.linalg.inv(hardi_img.affine)
the inverse transformation from world space to the voxel space as the affine for
the following model fit.
The next step is to fit the model, producing a `FiberFit` class instance, that
stores the data, as well as the results of the fitting procedure.
The LiFE model posits that the signal in the diffusion MRI volume can be
explained by the streamlines, by the equation
.. math::
y = X\beta
Where $y$ is the diffusion MRI signal, $\beta$ are a set of weights on the
streamlines and $X$ is a design matrix. This matrix has the dimensions $m$ by
$n$, where $m=n_{voxels} \cdot n_{directions}$, and $n_{voxels}$ is the set of
voxels in the ROI that contains the streamlines considered in this model. The
$i^{th}$ column of the matrix contains the expected contributions of the
$i^{th}$ streamline (arbitrarly ordered) to each of the voxels. $X$ is a sparse
matrix, because each streamline traverses only a small percentage of the
voxels. The expected contributions of the streamline are calculated using a
forward model, where each node of the streamline is modeled as a cylindrical
fiber compartment with Gaussian diffusion, using the diffusion tensor model. See
[Pestilli2014]_ for more detail on the model, and variations of this model.
"""
fiber_fit = fiber_model.fit(data, candidate_sl, affine=np.eye(4))
"""
The `FiberFit` class instance holds various properties of the model fit. For
example, it has the weights $\beta$, that are assigned to each streamline. In
most cases, a tractography through some region will include redundant
streamlines, and these streamlines will have $\beta_i$ that are 0.
"""
import matplotlib.pyplot as plt
import matplotlib
fig, ax = plt.subplots(1)
ax.hist(fiber_fit.beta, bins=100, histtype='step')
ax.set_xlabel('Fiber weights')
ax.set_ylabel('# fibers')
fig.savefig('beta_histogram.png')
"""
.. figure:: beta_histogram.png
:align: center
**LiFE streamline weights**
"""
"""
We use $\beta$ to filter out these redundant streamlines, and generate an
optimized group of streamlines:
"""
optimized_sl = list(np.array(candidate_sl)[np.where(fiber_fit.beta>0)[0]])
ren = fvtk.ren()
fvtk.add(ren, fvtk.streamtube(optimized_sl, line_colors(optimized_sl)))
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path='life_optimized.png',
size=(800, 800))
"""
.. figure:: life_optimized.png
:align: center
**Streamlines selected via LiFE optimization**
"""
"""
The new set of streamlines should do well in fitting the data, and redundant
streamlines have presumably been removed (in this case, about 50% of the
streamlines).
But how well does the model do in explaining the diffusion data? We can
quantify that: the `FiberFit` class instance has a `predict` method, which can
be used to invert the model and predict back either the data that was used to
fit the model, or other unseen data (e.g. in cross-validation, see
:ref:`kfold_xval`).
Without arguments, the `.predict()` method will predict the diffusion signal
for the same gradient table that was used in the fit data, but `gtab` and `S0`
key-word arguments can be used to predict for other acquisition schemes and
other baseline non-diffusion-weighted signals.
"""
model_predict = fiber_fit.predict()
"""
We will focus on the error in prediction of the diffusion-weighted data, and
calculate the root of the mean squared error.
"""
model_error = model_predict - fiber_fit.data
model_rmse = np.sqrt(np.mean(model_error[:, 10:] ** 2, -1))
"""
As a baseline against which we can compare, we calculate another error term. In
this case, we assume that the weight for each streamline is equal
to zero. This produces the naive prediction of the mean of the signal in each
voxel.
"""
beta_baseline = np.zeros(fiber_fit.beta.shape[0])
pred_weighted = np.reshape(opt.spdot(fiber_fit.life_matrix, beta_baseline),
(fiber_fit.vox_coords.shape[0],
np.sum(~gtab.b0s_mask)))
mean_pred = np.empty((fiber_fit.vox_coords.shape[0], gtab.bvals.shape[0]))
S0 = fiber_fit.b0_signal
"""
Since the fitting is done in the demeaned S/S0 domain, we need
to add back the mean and then multiply by S0 in every voxel:
"""
mean_pred[..., gtab.b0s_mask] = S0[:, None]
mean_pred[..., ~gtab.b0s_mask] =\
(pred_weighted + fiber_fit.mean_signal[:, None]) * S0[:, None]
mean_error = mean_pred - fiber_fit.data
mean_rmse = np.sqrt(np.mean(mean_error ** 2, -1))
"""
First, we can compare the overall distribution of errors between these two
alternative models of the ROI. We show the distribution of differences in error
(improvement through model fitting, relative to the baseline model). Here,
positive values denote an improvement in error with model fit, relative to
without the model fit.
"""
fig, ax = plt.subplots(1)
ax.hist(mean_rmse - model_rmse, bins=100, histtype='step')
ax.text(0.2, 0.9,'Median RMSE, mean model: %.2f' % np.median(mean_rmse),
horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
ax.text(0.2, 0.8,'Median RMSE, LiFE: %.2f' % np.median(model_rmse),
horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
ax.set_xlabel('RMS Error')
ax.set_ylabel('# voxels')
fig.savefig('error_histograms.png')
"""
.. figure:: error_histograms.png
:align: center
**Improvement in error with fitting of the LiFE model**.
"""
"""
Second, we can show the spatial distribution of the two error terms,
and of the improvement with the model fit:
"""
vol_model = np.ones(data.shape[:3]) * np.nan
vol_model[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = model_rmse
vol_mean = np.ones(data.shape[:3]) * np.nan
vol_mean[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse
vol_improve = np.ones(data.shape[:3]) * np.nan
vol_improve[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse - model_rmse
sl_idx = 49
from mpl_toolkits.axes_grid1 import AxesGrid
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95)
ax = AxesGrid(fig, 111,
nrows_ncols = (1, 3),
label_mode = "1",
share_all = True,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad="5%")
ax[0].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[0].matshow(np.rot90(vol_model[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[0].colorbar(im)
ax[1].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[1].matshow(np.rot90(vol_mean[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[1].colorbar(im)
ax[2].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[2].matshow(np.rot90(vol_improve[sl_idx, :, :]), cmap=matplotlib.cm.RdBu)
ax.cbar_axes[2].colorbar(im)
for lax in ax:
lax.set_xticks([])
lax.set_yticks([])
fig.savefig("spatial_errors.png")
"""
.. figure:: spatial_errors.png
:align: center
**Spatial distribution of error and improvement**
"""
"""
This image demonstrates that in many places, fitting the LiFE model results in
substantial reduction of the error.
Note that for full-brain tractographies *LiFE* can require large amounts of
memory. For detailed memory profiling of the algorithm, based on the
streamlines generated in :ref:`example_probabilistic_fiber_tracking`, see `this
IPython notebook
<http://nbviewer.ipython.org/gist/arokem/bc29f34ebc97510d9def>`_.
For the Matlab implementation of LiFE, head over to `Franco Pestilli's github
webpage <http://francopestilli.github.io/life/>`_.
References
~~~~~~~~~~~~~~~~~~~~~~
.. [Pestilli2014] Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell
B.A. (2014). Validation and statistical inference in living
connectomes. Nature Methods 11:
1058-1063. doi:10.1038/nmeth.3098
.. include:: ../links_names.inc
"""
|
bsd-3-clause
|
LPacademy/kaggle-challenge
|
Titanic/train_n_predict_mlp.py
|
2
|
3693
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:59:32 2017
@author: takashi
"""
from pre_process import pre_proc_all
from sklearn.model_selection import cross_val_score
from sklearn.neural_network import MLPClassifier as MLPClf
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "monospace"
plt.rcParams["font.size"] = 16
plt.rcParams["axes.grid"] = True
plt.rcParams["axes.facecolor"] = "white"
plt.rcParams["legend.fontsize"] = "small"
plt.rcParams["legend.loc"] = "best"
plt.rcParams["figure.facecolor"] = "white"
plt.rcParams["axes.titlesize"] = "medium"
import pandas as pd
import numpy as np
def predict_n_save(clf, predictor_df, file_path_save_results=None):
predict_arr = clf.predict(predictor_df)
# Convert to pandas dataframe from numpy array
temp_df = predictor_df.copy()
temp_df["Survived"] = predict_arr
predict_df = temp_df[["Survived"]].copy()
if file_path_save_results==None:
print "Not saved predicted results."
else:
try:
predict_df.to_csv(file_path_save_results)
print "Saved predicted results in", file_path_save_results+"."
except:
print "Failed saving predicted results."
return predict_df
if __name__=="__main__":
# Prepare train data and test data
train_df, train_target_df, test_df, predict_df = pre_proc_all()
# Train and evaluate the model
# hidden_layer_sizes_set, alpha_set = 2.0**np.mgrid[-10.:10.:3.,-10.:10.:3.]
alpha_set = 2.0**np.arange(-15, 5, 0.5)
cols_for_res = ["clf", "score_mean", "score_std"] #######
results_set = pd.DataFrame(columns=cols_for_res) #######
# for i, (hls, alpha) in enumerate(zip(hidden_layer_sizes_set.flatten(), alpha_set.flatten())):
for i, alpha in enumerate(alpha_set.flatten()):
# Define the classifyer model
clf = MLPClf(hidden_layer_sizes=(9, 9, 9),
alpha=alpha,
learning_rate_init=0.01
)
# Assess the model with cross validation data
tgt_arr = train_target_df.as_matrix().flatten()
scores = cross_val_score(clf, train_df, tgt_arr, cv=20)
temp = pd.DataFrame([[clf, scores.mean(), scores.std()]], columns=cols_for_res, index=[i])
results_set = results_set.append(temp) #######
print "{0:4d}/{1:4d} Param alpha: {2:0.2e}, ".format(i, alpha_set.size, alpha),
print "Score mean: {0:0.3f}, std: {1:0.3f}".format(scores.mean(), scores.std())
mean = results_set.score_mean
std = results_set.score_std
t = mean - 1.5*std
print results_set[t==t.max()][["score_mean","score_std"]]
plt.plot(mean)
# Plot images
# score_mean_set = mean.as_matrix().reshape(C_set.shape)
# score_std_set = std.as_matrix().reshape(C_set.shape)
# plt.figure("SVC", figsize=(16, 9))
# def subplots_im_data(subplot_pos, im_data, title=""):
# plt.subplot(subplot_pos)
# plt.imshow(im_data, interpolation="nearest", cmap="nipy_spectral")
# plt.title(title)
# plt.colorbar()
# subplots_im_data(231, np.log2(C_set), "Index of C, 2**x")
# subplots_im_data(232, np.log2(gamma_set), "Index of Gamma, 2**x")
# subplots_im_data(234, score_mean_set, "Mean of Score" )
# subplots_im_data(235, score_std_set, "Std of Score")
# subplots_im_data(236, score_mean_set-1.5*score_std_set, "Mean-1.5*Std")
# plt.show()
# Predict with the test.csv
# predict_df = predict_by_SVC_RBF(svc_best, test_df, file_path_save_results="predict.csv")
|
mit
|
DGrady/pandas
|
pandas/tests/io/parser/index_col.py
|
20
|
5352
|
# -*- coding: utf-8 -*-
"""
Tests that the specified index column (a.k.a 'index_col')
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
import pytest
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO
class IndexColTests(object):
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n" # noqa
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
pytest.raises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
assert xp.index.name == rs.index.name
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
assert xp.index.name == rs.index.name
def test_index_col_is_true(self):
# see gh-9798
pytest.raises(ValueError, self.read_csv,
StringIO(self.ts_data), index_col=True)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
assert data.index.equals(Index(['foo', 'bar', 'baz']))
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame(
[], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col),
expected, check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(
data), index_col=index_col),
expected, check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col),
expected, check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(
data), index_col=index_col),
expected, check_index_type=False)
def test_empty_with_index_col_false(self):
# see gh-10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
garverp/gnuradio
|
gr-filter/examples/fir_filter_fff.py
|
47
|
4014
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_fff(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
jkthompson/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkagg.py
|
70
|
4184
|
"""
Render to gtk from agg
"""
from __future__ import division
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_gtkagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKAgg(thisFig)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print 'backend_gtkagg.new_figure_manager done'
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print 'FigureCanvasGTKAgg.configure_event'
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print 'FigureCanvasGTKAgg.configure_event end'
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print 'FigureCanvasGTKAgg.render_figure'
FigureCanvasAgg.draw(self)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure pixmap', pixmap
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba(0,0)
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure done'
def blit(self, bbox=None):
if DEBUG: print 'FigureCanvasGTKAgg.blit'
if DEBUG: print 'FigureCanvasGTKAgg.blit', self._pixmap
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print 'FigureCanvasGTKAgg.done'
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
|
gpl-3.0
|
swnesbitt/PyDisdrometer
|
pydisdrometer/plot/plot.py
|
2
|
1700
|
# -*- coding: utf-8 -*-
'''
Plotting routines for different aspects of the drop size distribution class.
This may eventually be moved into the class itself.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from pylab import cm
def plot_dsd(dsd, range=None, log_scale=True, tighten=True):
'''Plotting function for drop size distribution Nd
plot_dsd creates a pcolor based plot for a drop size distribution object's
`Nd` field.
Parameters
----------
dsd: DropSizeDistribution
Drop Size Distribution instance containing a `Nd`.
range: tuple
A tuple containing the range to be plotted in form
(x_begin,x_end,y_begin,y_end)
log_scale: boolean
Whether to plot on a log scale, or a linear scale.
tighten: True
Whether to restrict plot to areas with data.
Returns
-------
fig_handle: Figure Handle
'''
fig_handle = plt.figure()
colors = [('white')] + [(cm.jet(i)) for i in xrange(1, 256)]
new_map = matplotlib.colors.LinearSegmentedColormap.from_list('new_map',
colors, N=256)
plt.pcolor(dsd.time, dsd.diameter, np.log10(dsd.Nd.T), vmin=0.0,
figure=fig_handle, cmap=new_map)
plt.axis('tight')
if range:
plt.axis(range)
else:
plt.axis((0, dsd.time[-1], 0, dsd.diameter[-1]))
if tighten:
max_diameter = dsd.diameter[len(dsd.diameter) -
np.argmax(np.nansum(dsd.Nd, axis=0)[::-1] > 0)]
plt.ylim(0, max_diameter)
plt.colorbar()
plt.xlabel('Time(m)')
plt.ylabel('Diameter(mm)')
return fig_handle
|
lgpl-2.1
|
theoryno3/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
75
|
40883
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
weissercn/learningml
|
learningml/GoF/analysis/S-4mu/plot_S-4mu_updated10_alphaSvalue_analysis.py
|
1
|
11120
|
from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
# Options for mode 'lower_level'
MODE = 'S-4mu_WigD'
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf")
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# S - 4 mu WigD
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if MODE == 'S-4mu_WigD':
#param_list = [0.1,0.08,0.06,0.04,0.02,0.0]
param_list = [0.1,0.08,0.06,0.04]
param_list = [0.0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1]
param_list = [0.05,0.1,0.3,0.5,0.7,1.]
ml_classifiers = ['nn','bdt']
ml_classifiers_colors = ['green','magenta','cyan']
ml_classifiers_bin = 5
chi2_color = 'red'
chi2_splits = [1,2,3,4,5,6,7,8,9,10]
#chi2_splits = [8]
ml_folder_name = "S-4mu_WigD/evaluation_S-VV-4mu_WigD_updated10"
chi2_folder_name = "S-4mu_WigD"
#chi2_folder_name = "event_shapes_lower_level_without_Mult"
ml_file_name = "{1}_S-VV-4mu_WigD_updated10_{0}_syst_0_01__chi2scoring_5_p_values"
chi2_file_name = "S-4mu_WigD_updated10_{0}D_chi2_{1}_splits_p_values"
#chi2_file_name = "event_shapes_lower_level_syst_0_01_attempt4_without_Mult__{0}D_chi2_{1}_splits_p_values"
chi2_1D_file_name = "S-4mu_WigD_updated10_1D_{0}D_chi2_{1}_splits_p_values"
chi2_m1D_file_name = "S-4mu_WigD_updated10_m1D_{0}D_chi2_{1}_splits_p_values"
title = "S-4mu"
name = "S-4mu"
CL = 0.95
ml_classifiers_dict={}
chi2_splits_dict={}
chi2_1D_splits_dict={}
chi2_m1D_splits_dict={}
#xwidth = [0.5]*len(param_list)
xwidth = np.subtract(param_list[1:],param_list[:-1])/2.
xwidth_left = np.append(xwidth[0] , xwidth)
xwidth_right = np.append(xwidth,xwidth[-1])
print("xwidth : ", xwidth)
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
if False:
for ml_classifier_index, ml_classifier in enumerate(ml_classifiers):
ml_classifiers_dict[ml_classifier]= []
for param in param_list:
p_values = np.loadtxt(os.environ['learningml']+"/GoF/optimisation_and_evaluation/"+ml_folder_name+"/"+ml_classifier+"/"+ml_file_name.format(param,ml_classifier,ml_classifiers_bin)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
ml_classifiers_dict[ml_classifier].append(p_values_in_CL)
ml_classifiers_dict[ml_classifier]= np.divide(ml_classifiers_dict[ml_classifier],100.)
ax.errorbar(param_list,ml_classifiers_dict['nn'], yerr=binomial_error(ml_classifiers_dict['nn']), linestyle='-', marker='s', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[0], label=r'$ANN$',clip_on=False)
print("bdt : ", ml_classifiers_dict['bdt'])
ax.errorbar(param_list,ml_classifiers_dict['bdt'], yerr=binomial_error(ml_classifiers_dict['bdt']), linestyle='-', marker='o', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[1], label=r'$BDT$', clip_on=False)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_splits_dict[str(chi2_split)]=[]
chi2_best = []
for param in param_list:
chi2_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_splits_dict[str(chi2_split)].append(temp)
chi2_best_dim.append(temp)
temp_best = np.max(chi2_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_1D_splits_dict[str(chi2_split)]=[]
chi2_1D_best = []
for param in param_list:
chi2_1D_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_1D_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_1D_splits_dict[str(chi2_split)].append(temp)
chi2_1D_best_dim.append(temp)
temp_best = np.max(chi2_1D_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_1D_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_m1D_splits_dict[str(chi2_split)]=[]
chi2_m1D_best = []
for param in param_list:
chi2_m1D_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_m1D_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_m1D_splits_dict[str(chi2_split)].append(temp)
chi2_m1D_best_dim.append(temp)
temp_best = np.max(chi2_m1D_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_m1D_best.append(temp_best)
#print("chi2_best : ",chi2_best)
print("param_list : ",param_list)
print("chi2_best : ", chi2_best)
print("chi2_splits_dict : ", chi2_splits_dict)
ax.errorbar(param_list,chi2_best, yerr=binomial_error(chi2_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='black', label=r'$\chi^2 w/\_mass$', clip_on=False)
ax.errorbar(param_list,chi2_1D_best, yerr=binomial_error(chi2_1D_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='blue', label=r'$\chi^2 only\_mass$', clip_on=False)
ax.errorbar(param_list,chi2_m1D_best, yerr=binomial_error(chi2_m1D_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='red', label=r'$\chi^2 w/o\_mass$', clip_on=False)
print("ml_classifiers_dict : ",ml_classifiers_dict)
print("chi2_best : ", chi2_best)
#ax.plot((0.1365,0.1365),(0.,1.),c="grey",linestyle="--")
ax.set_xlim([0.,1.])
#ax.set_xlim([0.129,0.1405])
ax.set_ylim([0.,1.])
ax.set_xlabel(r"$p_{signal}$")
ax.set_ylabel("Fraction rejected")
plt.legend(frameon=False, numpoints=1)
#a, b, c = [0.130,0.133], [0.1365],[0.14]
#ax.set_xticks(a+b+c)
#xx, locs = plt.xticks()
#ll = ['%.3f' % y for y in a] + ['%.4f' % y for y in b] + ['%.3f' % y for y in c]
#plt.xticks(xx, ll)
#ax.legend(loc='lower left', frameon=False, numpoints=1)
fig_leg = plt.figure(figsize=(8,2.7))
ax_leg = fig_leg.add_axes([0.0,0.0,1.0,1.0])
plt.tick_params(axis='x',which='both',bottom='off', top='off', labelbottom='off')
plt.tick_params(axis='y',which='both',bottom='off', top='off', labelbottom='off')
ax_leg.yaxis.set_ticks_position('none')
ax_leg.set_frame_on(False)
plt.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left',frameon=False, numpoints=1,ncol=2)
fig_leg.savefig("S-4mu_WigD_updated10_analysis_legend.pdf")
#fig_name=name+"_alphaSvalue_analysis"
fig_name="S-4mu_WigD_updated10_analysis"
fig.savefig(fig_name+".pdf")
fig.savefig(fig_name+"_"+time.strftime("%b_%d_%Y")+".pdf")
print("Saved the figure as" , fig_name+".pdf")
|
mit
|
altairpearl/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
110
|
3768
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
jennyzhang0215/incubator-mxnet
|
example/rcnn/rcnn/core/tester.py
|
16
|
10270
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import time
import mxnet as mx
import numpy as np
from builtins import range
from .module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
pickle.dump(imdb_boxes, f, pickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
pickle.dump(original_boxes, f, pickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [all_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, protocol=pickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
|
apache-2.0
|
iut-ibk/DynaMind-BasicModules
|
scripts/Modules/plotraster.py
|
2
|
2932
|
"""
@file
@author Chrisitan Urich <[email protected]>
@version 1.0
@section LICENSE
This file is part of DynaMind
Copyright (C) 2012 Christian Urich
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from pydynamind import *
from numpy import *
from scipy import *
from matplotlib.pyplot import *
import os
import tempfile
class PlotRaster(Module):
def __init__(self):
Module.__init__(self)
self.vmin = 0
self.vmax = 0
self.createParameter("RasterDataName", STRING, "" )
self.RasterDataName = ""
self.vec = View("dummy", SUBSYSTEM, MODIFY)
self.offsetX = 0
self.OffsetY = 0
views = []
views.append(self.vec)
self.addData("System", views)
self.counter = 0
self.createParameter("Folder", STRING, "")
self.Folder = ""
def run(self):
fig = figure()
index = 1
f = fig.add_subplot(1,1 ,1)
r = self.getRasterData("System",View(self.RasterDataName, RASTERDATA, READ))
f = fig.add_subplot(1,index,1)
f.set_title(self.RasterDataName)
a = array([])
b = []
nameMap = ""
PlotStyle = ""
width = r.getWidth()
height = r.getHeight()
val = []
cval = array([])
a.resize(height, width)
for i in range(width):
for j in range(height):
a[j,i] = r.getValue(i,j) * 1
imshow(a, origin='lower', extent=[0,width,0,height], interpolation='nearest')
colorbar(ax = f, orientation='horizontal')
filename = "plot_"
filename+=str(self.counter).zfill(4)
filename+=".png"
savefig(str(self.Folder)+'/'+filename, dpi=720)
#fig.show()
close()
self.counter+=1
|
gpl-2.0
|
Windy-Ground/scikit-learn
|
sklearn/neighbors/classification.py
|
132
|
14388
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
pySTEPS/pysteps
|
pysteps/io/importers.py
|
1
|
54173
|
# -*- coding: utf-8 -*-
"""
pysteps.io.importers
====================
Methods for importing files containing two-dimensional radar mosaics.
The methods in this module implement the following interface::
import_xxx(filename, optional arguments)
where **xxx** is the name (or abbreviation) of the file format and filename
is the name of the input file.
The output of each method is a three-element tuple containing a two-dimensional
radar mosaic, the corresponding quality field and a metadata dictionary. If the
file contains no quality information, the quality field is set to None. Pixels
containing missing data are set to nan.
The metadata dictionary contains the following recommended key-value pairs:
.. tabularcolumns:: |p{2cm}|L|
+------------------+----------------------------------------------------------+
| Key | Value |
+==================+==========================================================+
| projection | PROJ.4-compatible projection definition |
+------------------+----------------------------------------------------------+
| x1 | x-coordinate of the lower-left corner of the data raster |
+------------------+----------------------------------------------------------+
| y1 | y-coordinate of the lower-left corner of the data raster |
+------------------+----------------------------------------------------------+
| x2 | x-coordinate of the upper-right corner of the data raster|
+------------------+----------------------------------------------------------+
| y2 | y-coordinate of the upper-right corner of the data raster|
+------------------+----------------------------------------------------------+
| xpixelsize | grid resolution in x-direction |
+------------------+----------------------------------------------------------+
| ypixelsize | grid resolution in y-direction |
+------------------+----------------------------------------------------------+
| cartesian_unit | the physical unit of the cartesian x- and y-coordinates: |
| | e.g. 'm' or 'km' |
+------------------+----------------------------------------------------------+
| yorigin | a string specifying the location of the first element in |
| | the data raster w.r.t. y-axis: |
| | 'upper' = upper border |
| | 'lower' = lower border |
+------------------+----------------------------------------------------------+
| institution | name of the institution who provides the data |
+------------------+----------------------------------------------------------+
| unit | the physical unit of the data: 'mm/h', 'mm' or 'dBZ' |
+------------------+----------------------------------------------------------+
| transform | the transformation of the data: None, 'dB', 'Box-Cox' or |
| | others |
+------------------+----------------------------------------------------------+
| accutime | the accumulation time in minutes of the data, float |
+------------------+----------------------------------------------------------+
| threshold | the rain/no rain threshold with the same unit, |
| | transformation and accutime of the data. |
+------------------+----------------------------------------------------------+
| zerovalue | the value assigned to the no rain pixels with the same |
| | unit, transformation and accutime of the data. |
+------------------+----------------------------------------------------------+
| zr_a | the Z-R constant a in Z = a*R**b |
+------------------+----------------------------------------------------------+
| zr_b | the Z-R exponent b in Z = a*R**b |
+------------------+----------------------------------------------------------+
Available Importers
-------------------
.. autosummary::
:toctree: ../generated/
import_bom_rf3
import_fmi_geotiff
import_fmi_pgm
import_knmi_hdf5
import_mch_gif
import_mch_hdf5
import_mch_metranet
import_mrms_grib
import_odim_hdf5
import_opera_hdf5
import_saf_crri
"""
import gzip
import os
from functools import partial
import numpy as np
from matplotlib.pyplot import imread
from pysteps.decorators import postprocess_import
from pysteps.exceptions import DataModelError
from pysteps.exceptions import MissingOptionalDependency
from pysteps.utils import aggregate_fields
try:
import gdalconst
from osgeo import gdal, osr
GDAL_IMPORTED = True
except ImportError:
GDAL_IMPORTED = False
try:
import h5py
H5PY_IMPORTED = True
except ImportError:
H5PY_IMPORTED = False
try:
import metranet
METRANET_IMPORTED = True
except ImportError:
METRANET_IMPORTED = False
try:
import netCDF4
NETCDF4_IMPORTED = True
except ImportError:
NETCDF4_IMPORTED = False
try:
from PIL import Image
PIL_IMPORTED = True
except ImportError:
PIL_IMPORTED = False
try:
import pyproj
PYPROJ_IMPORTED = True
except ImportError:
PYPROJ_IMPORTED = False
try:
import pygrib
PYGRIB_IMPORTED = True
except ImportError:
PYGRIB_IMPORTED = False
def _check_coords_range(selected_range, coordinate, full_range):
"""Check that the coordinates range arguments follow the expected pattern in
the **import_mrms_grib** function."""
if selected_range is None:
return sorted(full_range)
if not isinstance(selected_range, (list, tuple)):
if len(selected_range) != 2:
raise ValueError(
f"The {coordinate} range must be None or a two-element tuple or list"
)
selected_range = list(selected_range) # Make mutable
for i in range(2):
if selected_range[i] is None:
selected_range[i] = full_range
selected_range.sort()
return tuple(selected_range)
def _get_grib_projection(grib_msg):
"""Get the projection parameters from the grib file."""
projparams = grib_msg.projparams
# Some versions of pygrib defines the regular lat/lon projections as "cyl",
# which causes errors in pyproj and cartopy. Here we replace it for "longlat".
if projparams["proj"] == "cyl":
projparams["proj"] = "longlat"
# Grib C tables (3-2)
# https://apps.ecmwf.int/codes/grib/format/grib2/ctables/3/2
# https://en.wikibooks.org/wiki/PROJ.4
_grib_shapes_of_earth = dict()
_grib_shapes_of_earth[0] = {"R": 6367470}
_grib_shapes_of_earth[1] = {"R": 6367470}
_grib_shapes_of_earth[2] = {"ellps": "IAU76"}
_grib_shapes_of_earth[4] = {"ellps": "GRS80"}
_grib_shapes_of_earth[5] = {"ellps": "WGS84"}
_grib_shapes_of_earth[6] = {"R": 6371229}
_grib_shapes_of_earth[8] = {"datum": "WGS84", "R": 6371200}
_grib_shapes_of_earth[9] = {"datum": "OSGB36"}
# pygrib defines the ellipsoids using "a" and "b" only.
# Here we replace the for the PROJ.4 SpheroidCodes if they are available.
if grib_msg["shapeOfTheEarth"] in _grib_shapes_of_earth:
keys_to_remove = ["a", "b"]
for key in keys_to_remove:
if key in projparams:
del projparams[key]
projparams.update(_grib_shapes_of_earth[grib_msg["shapeOfTheEarth"]])
return projparams
def _get_threshold_value(precip):
"""
Get the the rain/no rain threshold with the same unit, transformation and
accutime of the data.
If all the values are NaNs, the returned value is `np.nan`.
Otherwise, np.min(precip[precip > precip.min()]) is returned.
Returns
-------
threshold: float
"""
valid_mask = np.isfinite(precip)
if valid_mask.any():
_precip = precip[valid_mask]
min_precip = _precip.min()
above_min_mask = _precip > min_precip
if above_min_mask.any():
return np.min(_precip[above_min_mask])
else:
return min_precip
else:
return np.nan
@postprocess_import(dtype="float32")
def import_mrms_grib(filename, extent=None, window_size=4, **kwargs):
"""
Importer for NSSL's Multi-Radar/Multi-Sensor System
([MRMS](https://www.nssl.noaa.gov/projects/mrms/)) rainrate product
(grib format).
The rainrate values are expressed in mm/h, and the dimensions of the data
array are [latitude, longitude]. The first grid point (0,0) corresponds to
the upper left corner of the domain, while (last i, last j) denote the
lower right corner.
Due to the large size of the dataset (3500 x 7000), a float32 type is used
by default to reduce the memory footprint. However, be aware that when this
array is passed to a pystep function, it may be converted to double
precision, doubling the memory footprint.
To change the precision of the data, use the *dtype* keyword.
Also, by default, the original data is downscaled by 4
(resulting in a ~4 km grid spacing).
In case that the original grid spacing is needed, use `window_size=1`.
But be aware that a single composite in double precipitation will
require 186 Mb of memory.
Finally, if desired, the precipitation data can be extracted over a
sub region of the full domain using the `extent` keyword.
By default, the entire domain is returned.
Notes
-----
In the MRMS grib files, "-3" is used to represent "No Coverage" or
"Missing data". However, in this reader replace those values by the value
specified in the `fillna` argument (NaN by default).
Note that "missing values" are not the same as "no precipitation" values.
Missing values indicates regions with no valid measures.
While zero precipitation indicates regions with valid measurements,
but with no precipitation detected.
Parameters
----------
filename: str
Name of the file to import.
extent: None or array-like
Longitude and latitude range (in degrees) of the data to be retrieved.
(min_lon, max_lon, min_lat, max_lat).
By default (None), the entire domain is retrieved.
The extent can be in any form that can be converted to a flat array
of 4 elements array (e.g., lists or tuples).
window_size: array_like or int
Array containing down-sampling integer factor along each axis.
If an integer value is given, the same block shape is used for all the
image dimensions.
Default: window_size=4.
{extra_kwargs_doc}
Returns
-------
precipitation: 2D array, float32
Precipitation field in mm/h. The dimensions are [latitude, longitude].
The first grid point (0,0) corresponds to the upper left corner of the
domain, while (last i, last j) denote the lower right corner.
quality: None
Not implement.
metadata: dict
Associated metadata (pixel sizes, map projections, etc.).
"""
del kwargs
if not PYGRIB_IMPORTED:
raise MissingOptionalDependency(
"pygrib package is required to import NCEP's MRMS products but it is not installed"
)
try:
grib_file = pygrib.open(filename)
except OSError:
raise OSError(f"Error opening NCEP's MRMS file. " f"File Not Found: {filename}")
if isinstance(window_size, int):
window_size = (window_size, window_size)
if extent is not None:
extent = np.asarray(extent)
if (extent.ndim != 1) or (extent.size != 4):
raise ValueError(
"The extent must be None or a flat array with 4 elements.\n"
f"Received: extent.shape = {str(extent.shape)}"
)
# The MRMS grib file contain one message with the precipitation intensity
grib_file.rewind()
grib_msg = grib_file.read(1)[0] # Read the only message
# -------------------------
# Read the grid information
lr_lon = grib_msg["longitudeOfLastGridPointInDegrees"]
lr_lat = grib_msg["latitudeOfLastGridPointInDegrees"]
ul_lon = grib_msg["longitudeOfFirstGridPointInDegrees"]
ul_lat = grib_msg["latitudeOfFirstGridPointInDegrees"]
# Ni - Number of points along a latitude circle (west-east)
# Nj - Number of points along a longitude meridian (south-north)
# The lat/lon grid has a 0.01 degrees spacing.
lats = np.linspace(ul_lat, lr_lat, grib_msg["Nj"])
lons = np.linspace(ul_lon, lr_lon, grib_msg["Ni"])
precip = grib_msg.values
no_data_mask = precip == -3 # Missing values
# Create a function with default arguments for aggregate_fields
block_reduce = partial(aggregate_fields, method="mean", trim=True)
if window_size != (1, 1):
# Downscale data
lats = block_reduce(lats, window_size[0])
lons = block_reduce(lons, window_size[1])
# Update the limits
ul_lat, lr_lat = lats[0], lats[-1] # Lat from North to south!
ul_lon, lr_lon = lons[0], lons[-1]
precip[no_data_mask] = 0 # block_reduce does not handle nan values
precip = block_reduce(precip, window_size, axis=(0, 1))
# Consider that if a single invalid observation is located in the block,
# then mark that value as invalid.
no_data_mask = block_reduce(
no_data_mask.astype("int"), window_size, axis=(0, 1)
).astype(bool)
lons, lats = np.meshgrid(lons, lats)
precip[no_data_mask] = np.nan
if extent is not None:
# clip domain
ul_lon, lr_lon = _check_coords_range(
(extent[0], extent[1]), "longitude", (ul_lon, lr_lon)
)
lr_lat, ul_lat = _check_coords_range(
(extent[2], extent[3]), "latitude", (ul_lat, lr_lat)
)
mask_lat = (lats >= lr_lat) & (lats <= ul_lat)
mask_lon = (lons >= ul_lon) & (lons <= lr_lon)
nlats = np.count_nonzero(mask_lat[:, 0])
nlons = np.count_nonzero(mask_lon[0, :])
precip = precip[mask_lon & mask_lat].reshape(nlats, nlons)
proj_params = _get_grib_projection(grib_msg)
pr = pyproj.Proj(proj_params)
proj_def = " ".join([f"+{key}={value} " for key, value in proj_params.items()])
x1, y1 = pr(ul_lon, lr_lat)
x2, y2 = pr(lr_lon, ul_lat)
metadata = dict(
xpixelsize=grib_msg["iDirectionIncrementInDegrees"] * window_size[0],
ypixelsize=grib_msg["jDirectionIncrementInDegrees"] * window_size[1],
unit="mm/h",
transform=None,
zerovalue=0,
projection=proj_def.strip(),
yorigin="upper",
threshold=_get_threshold_value(precip),
x1=x1,
x2=x2,
y1=y1,
y2=y2,
cartesian_unit="degrees",
)
return precip, None, metadata
@postprocess_import()
def import_bom_rf3(filename, **kwargs):
"""Import a NetCDF radar rainfall product from the BoM Rainfields3.
Parameters
----------
filename: str
Name of the file to import.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the rainfall field in mm/h imported
from the Bureau RF3 netcdf, the quality field and the metadata. The
quality field is currently set to None.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required to import BoM Rainfields3 products "
"but it is not installed"
)
precip = _import_bom_rf3_data(filename)
geodata = _import_bom_rf3_geodata(filename)
metadata = geodata
# TODO(import_bom_rf3): Add missing georeferencing data.
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(precip)
metadata["threshold"] = _get_threshold_value(precip)
return precip, None, metadata
def _import_bom_rf3_data(filename):
ds_rainfall = netCDF4.Dataset(filename)
if "precipitation" in ds_rainfall.variables.keys():
precipitation = ds_rainfall.variables["precipitation"][:]
else:
precipitation = None
ds_rainfall.close()
return precipitation
def _import_bom_rf3_geodata(filename):
geodata = {}
ds_rainfall = netCDF4.Dataset(filename)
if "proj" in ds_rainfall.variables.keys():
projection = ds_rainfall.variables["proj"]
if getattr(projection, "grid_mapping_name") == "albers_conical_equal_area":
projdef = "+proj=aea "
lon_0 = getattr(projection, "longitude_of_central_meridian")
projdef += " +lon_0=" + f"{lon_0:.3f}"
lat_0 = getattr(projection, "latitude_of_projection_origin")
projdef += " +lat_0=" + f"{lat_0:.3f}"
standard_parallels = getattr(projection, "standard_parallel")
projdef += " +lat_1=" + f"{standard_parallels[0]:.3f}"
projdef += " +lat_2=" + f"{standard_parallels[1]:.3f}"
else:
projdef = None
geodata["projection"] = projdef
if "valid_min" in ds_rainfall.variables["x"].ncattrs():
xmin = getattr(ds_rainfall.variables["x"], "valid_min")
xmax = getattr(ds_rainfall.variables["x"], "valid_max")
ymin = getattr(ds_rainfall.variables["y"], "valid_min")
ymax = getattr(ds_rainfall.variables["y"], "valid_max")
else:
xmin = min(ds_rainfall.variables["x"])
xmax = max(ds_rainfall.variables["x"])
ymin = min(ds_rainfall.variables["y"])
ymax = max(ds_rainfall.variables["y"])
xpixelsize = abs(ds_rainfall.variables["x"][1] - ds_rainfall.variables["x"][0])
ypixelsize = abs(ds_rainfall.variables["y"][1] - ds_rainfall.variables["y"][0])
factor_scale = 1.0
if "units" in ds_rainfall.variables["x"].ncattrs():
if getattr(ds_rainfall.variables["x"], "units") == "km":
factor_scale = 1000.0
geodata["x1"] = xmin * factor_scale
geodata["y1"] = ymin * factor_scale
geodata["x2"] = xmax * factor_scale
geodata["y2"] = ymax * factor_scale
geodata["xpixelsize"] = xpixelsize * factor_scale
geodata["ypixelsize"] = ypixelsize * factor_scale
geodata["cartesian_unit"] = "m"
geodata["yorigin"] = "upper" # TODO(_import_bom_rf3_geodata): check this
# get the accumulation period
valid_time = None
if "valid_time" in ds_rainfall.variables.keys():
times = ds_rainfall.variables["valid_time"]
calendar = "standard"
if "calendar" in times.ncattrs():
calendar = times.calendar
valid_time = netCDF4.num2date(times[:], units=times.units, calendar=calendar)
start_time = None
if "start_time" in ds_rainfall.variables.keys():
times = ds_rainfall.variables["start_time"]
calendar = "standard"
if "calendar" in times.ncattrs():
calendar = times.calendar
start_time = netCDF4.num2date(times[:], units=times.units, calendar=calendar)
time_step = None
if start_time is not None:
if valid_time is not None:
time_step = (valid_time - start_time).seconds // 60
geodata["accutime"] = time_step
# get the unit of precipitation
if "units" in ds_rainfall.variables["precipitation"].ncattrs():
units = getattr(ds_rainfall.variables["precipitation"], "units")
if units in ("kg m-2", "mm"):
geodata["unit"] = "mm"
geodata["institution"] = "Commonwealth of Australia, Bureau of Meteorology"
ds_rainfall.close()
return geodata
@postprocess_import()
def import_fmi_geotiff(filename, **kwargs):
"""Import a reflectivity field (dBZ) from an FMI GeoTIFF file.
Parameters
----------
filename: str
Name of the file to import.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the precipitation field,
the associated quality field and metadata.
The quality field is currently set to None.
"""
if not GDAL_IMPORTED:
raise MissingOptionalDependency(
"gdal package is required to import "
"FMI's radar reflectivity composite in GeoTIFF format "
"but it is not installed"
)
f = gdal.Open(filename, gdalconst.GA_ReadOnly)
rb = f.GetRasterBand(1)
precip = rb.ReadAsArray()
mask = precip == 255
precip = precip.astype(float) * rb.GetScale() + rb.GetOffset()
precip = (precip - 64.0) / 2.0
precip[mask] = np.nan
sr = osr.SpatialReference()
pr = f.GetProjection()
sr.ImportFromWkt(pr)
projdef = sr.ExportToProj4()
gt = f.GetGeoTransform()
metadata = {}
metadata["projection"] = projdef
metadata["x1"] = gt[0]
metadata["y1"] = gt[3] + gt[5] * f.RasterYSize
metadata["x2"] = metadata["x1"] + gt[1] * f.RasterXSize
metadata["y2"] = gt[3]
metadata["xpixelsize"] = abs(gt[1])
metadata["ypixelsize"] = abs(gt[5])
if gt[5] < 0:
metadata["yorigin"] = "upper"
else:
metadata["yorigin"] = "lower"
metadata["institution"] = "Finnish Meteorological Institute"
metadata["unit"] = rb.GetUnitType()
metadata["transform"] = None
metadata["accutime"] = 5.0
metadata["threshold"] = _get_threshold_value(precip)
metadata["zerovalue"] = np.nanmin(precip)
metadata["cartesian_unit"] = "m"
return precip, None, metadata
@postprocess_import()
def import_fmi_pgm(filename, gzipped=False, **kwargs):
"""Import a 8-bit PGM radar reflectivity composite from the FMI archive.
Parameters
----------
filename: str
Name of the file to import.
gzipped: bool
If True, the input file is treated as a compressed gzip file.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the reflectivity composite in dBZ
and the associated quality field and metadata. The quality field is
currently set to None.
Notes
-----
Reading georeferencing metadata is supported only for stereographic
projection. For other projections, the keys related to georeferencing are
not set.
"""
if not PYPROJ_IMPORTED:
raise MissingOptionalDependency(
"pyproj package is required to import "
"FMI's radar reflectivity composite "
"but it is not installed"
)
if gzipped is False:
precip = imread(filename)
else:
precip = imread(gzip.open(filename, "r"))
pgm_metadata = _import_fmi_pgm_metadata(filename, gzipped=gzipped)
geodata = _import_fmi_pgm_geodata(pgm_metadata)
mask = precip == pgm_metadata["missingval"]
precip = precip.astype(float)
precip[mask] = np.nan
precip = (precip - 64.0) / 2.0
metadata = geodata
metadata["institution"] = "Finnish Meteorological Institute"
metadata["accutime"] = 5.0
metadata["unit"] = "dBZ"
metadata["transform"] = "dB"
metadata["zerovalue"] = np.nanmin(precip)
metadata["threshold"] = _get_threshold_value(precip)
metadata["zr_a"] = 223.0
metadata["zr_b"] = 1.53
return precip, None, metadata
def _import_fmi_pgm_geodata(metadata):
geodata = {}
projdef = ""
if "type" in metadata.keys() and metadata["type"][0] == "stereographic":
projdef += "+proj=stere "
projdef += " +lon_0=" + metadata["centrallongitude"][0] + "E"
projdef += " +lat_0=" + metadata["centrallatitude"][0] + "N"
projdef += " +lat_ts=" + metadata["truelatitude"][0]
# These are hard-coded because the projection definition
# is missing from the PGM files.
projdef += " +a=6371288"
projdef += " +x_0=380886.310"
projdef += " +y_0=3395677.920"
projdef += " +no_defs"
#
geodata["projection"] = projdef
ll_lon, ll_lat = [float(v) for v in metadata["bottomleft"]]
ur_lon, ur_lat = [float(v) for v in metadata["topright"]]
pr = pyproj.Proj(projdef)
x1, y1 = pr(ll_lon, ll_lat)
x2, y2 = pr(ur_lon, ur_lat)
geodata["x1"] = x1
geodata["y1"] = y1
geodata["x2"] = x2
geodata["y2"] = y2
geodata["cartesian_unit"] = "m"
geodata["xpixelsize"] = float(metadata["metersperpixel_x"][0])
geodata["ypixelsize"] = float(metadata["metersperpixel_y"][0])
geodata["yorigin"] = "upper"
return geodata
def _import_fmi_pgm_metadata(filename, gzipped=False):
metadata = {}
if not gzipped:
f = open(filename, "rb")
else:
f = gzip.open(filename, "rb")
file_line = f.readline()
while not file_line.startswith(b"#"):
file_line = f.readline()
while file_line.startswith(b"#"):
x = file_line.decode()
x = x[1:].strip().split(" ")
if len(x) >= 2:
k = x[0]
v = x[1:]
metadata[k] = v
else:
file_line = f.readline()
continue
file_line = f.readline()
file_line = f.readline().decode()
metadata["missingval"] = int(file_line)
f.close()
return metadata
@postprocess_import()
def import_knmi_hdf5(filename, qty="ACRR", accutime=5.0, pixelsize=1.0, **kwargs):
"""Import a precipitation or reflectivity field (and optionally the quality
field) from a HDF5 file conforming to the KNMI Data Centre specification.
Parameters
----------
filename: str
Name of the file to import.
qty: {'ACRR', 'DBZH'}
The quantity to read from the file. The currently supported identifiers
are: 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity
(dBZ). The default value is 'ACRR'.
accutime: float
The accumulation time of the dataset in minutes. A 5 min accumulation
is used as default, but hourly, daily and monthly accumulations
are also available.
pixelsize: float
The pixel size of a raster cell in kilometers. The default value for the
KNMI datasets is a 1 km grid cell size, but datasets with 2.4 km pixel
size are also available.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing precipitation accumulation [mm] /
reflectivity [dBZ] of the KNMI product, the associated quality field
and metadata. The quality field is currently set to None.
Notes
-----
Every KNMI data type has a slightly different naming convention. The
standard setup is based on the accumulated rainfall product on 1 km2 spatial
and 5 min temporal resolution.
See https://data.knmi.nl/datasets?q=radar for a list of all available KNMI
radar data.
"""
# TODO: Add quality field.
if not H5PY_IMPORTED:
raise MissingOptionalDependency(
"h5py package is required to import "
"KNMI's radar datasets "
"but it is not installed"
)
if qty not in ["ACRR", "DBZH"]:
raise ValueError(
"unknown quantity %s: the available options are 'ACRR' and 'DBZH' "
)
####
# Precipitation fields
####
f = h5py.File(filename, "r")
dset = f["image1"]["image_data"]
precip_intermediate = np.copy(dset) # copy the content
# In case precip is a rainfall accumulation (ACRR), precip is divided by 100.0,
# because the data is saved as hundreds of mm (so, as integers). 65535 is
# the no data value. The precision of the data is two decimals (0.01 mm).
if qty == "ACRR":
precip = np.where(
precip_intermediate == 65535, np.NaN, precip_intermediate / 100.0
)
# In case reflectivities are imported, the no data value is 255. Values are
# saved as integers. The reflectivities are not directly saved in dBZ, but
# as: dBZ = 0.5 * pixel_value - 32.0 (this used to be 31.5).
if qty == "DBZH":
precip = np.where(
precip_intermediate == 255, np.NaN, precip_intermediate * 0.5 - 32.0
)
if precip is None:
raise IOError("requested quantity not found")
####
# Meta data
####
metadata = {}
if qty == "ACRR":
unit = "mm"
transform = None
elif qty == "DBZH":
unit = "dBZ"
transform = "dB"
# The 'where' group of mch- and Opera-data, is called 'geographic' in the
# KNMI data.
geographic = f["geographic"]
proj4str = geographic["map_projection"].attrs["projection_proj4_params"].decode()
pr = pyproj.Proj(proj4str)
metadata["projection"] = proj4str
# Get coordinates
latlon_corners = geographic.attrs["geo_product_corners"]
ll_lat = latlon_corners[1]
ll_lon = latlon_corners[0]
ur_lat = latlon_corners[5]
ur_lon = latlon_corners[4]
lr_lat = latlon_corners[7]
lr_lon = latlon_corners[6]
ul_lat = latlon_corners[3]
ul_lon = latlon_corners[2]
ll_x, ll_y = pr(ll_lon, ll_lat)
ur_x, ur_y = pr(ur_lon, ur_lat)
lr_x, lr_y = pr(lr_lon, lr_lat)
ul_x, ul_y = pr(ul_lon, ul_lat)
x1 = min(ll_x, ul_x)
y1 = min(ll_y, lr_y)
x2 = max(lr_x, ur_x)
y2 = max(ul_y, ur_y)
# Fill in the metadata
metadata["x1"] = x1
metadata["y1"] = y1
metadata["x2"] = x2
metadata["y2"] = y2
metadata["xpixelsize"] = pixelsize
metadata["ypixelsize"] = pixelsize
metadata["cartesian_unit"] = "km"
metadata["yorigin"] = "upper"
metadata["institution"] = "KNMI - Royal Netherlands Meteorological Institute"
metadata["accutime"] = accutime
metadata["unit"] = unit
metadata["transform"] = transform
metadata["zerovalue"] = 0.0
metadata["threshold"] = _get_threshold_value(precip)
metadata["zr_a"] = 200.0
metadata["zr_b"] = 1.6
f.close()
return precip, None, metadata
@postprocess_import()
def import_mch_gif(filename, product, unit, accutime, **kwargs):
"""Import a 8-bit gif radar reflectivity composite from the MeteoSwiss
archive.
Parameters
----------
filename: str
Name of the file to import.
product: {"AQC", "CPC", "RZC", "AZC"}
The name of the MeteoSwiss QPE product.\n
Currently supported prducts:
+------+----------------------------+
| Name | Product |
+======+============================+
| AQC | Acquire |
+------+----------------------------+
| CPC | CombiPrecip |
+------+----------------------------+
| RZC | Precip |
+------+----------------------------+
| AZC | RZC accumulation |
+------+----------------------------+
unit: {"mm/h", "mm", "dBZ"}
the physical unit of the data
accutime: float
the accumulation time in minutes of the data
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the precipitation field in mm/h imported
from a MeteoSwiss gif file and the associated quality field and metadata.
The quality field is currently set to None.
"""
if not PIL_IMPORTED:
raise MissingOptionalDependency(
"PIL package is required to import "
"radar reflectivity composite from MeteoSwiss"
"but it is not installed"
)
geodata = _import_mch_geodata()
metadata = geodata
# import gif file
img = Image.open(filename)
if product.lower() in ["azc", "rzc", "precip"]:
# convert 8-bit GIF colortable to RGB values
img_rgb = img.convert("RGB")
# load lookup table
if product.lower() == "azc":
lut_filename = os.path.join(
os.path.dirname(__file__), "mch_lut_8bit_Metranet_AZC_V104.txt"
)
else:
lut_filename = os.path.join(
os.path.dirname(__file__), "mch_lut_8bit_Metranet_v103.txt"
)
lut = np.genfromtxt(lut_filename, skip_header=1)
lut = dict(zip(zip(lut[:, 1], lut[:, 2], lut[:, 3]), lut[:, -1]))
# apply lookup table conversion
precip = np.zeros(len(img_rgb.getdata()))
for i, dn in enumerate(img_rgb.getdata()):
precip[i] = lut.get(dn, np.nan)
# convert to original shape
width, height = img.size
precip = precip.reshape(height, width)
# set values outside observational range to NaN,
# and values in non-precipitating areas to zero.
precip[precip < 0] = 0
precip[precip > 9999] = np.nan
elif product.lower() in ["aqc", "cpc", "acquire ", "combiprecip"]:
# convert digital numbers to physical values
img = np.array(img).astype(int)
# build lookup table [mm/5min]
lut = np.zeros(256)
a = 316.0
b = 1.5
for i in range(256):
if (i < 2) or (i > 250 and i < 255):
lut[i] = 0.0
elif i == 255:
lut[i] = np.nan
else:
lut[i] = (10.0 ** ((i - 71.5) / 20.0) / a) ** (1.0 / b)
# apply lookup table
precip = lut[img]
else:
raise ValueError("unknown product %s" % product)
metadata["accutime"] = accutime
metadata["unit"] = unit
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(precip)
metadata["threshold"] = _get_threshold_value(precip)
metadata["institution"] = "MeteoSwiss"
metadata["product"] = product
metadata["zr_a"] = 316.0
metadata["zr_b"] = 1.5
return precip, None, metadata
@postprocess_import()
def import_mch_hdf5(filename, qty="RATE", **kwargs):
"""Import a precipitation field (and optionally the quality field) from a
MeteoSwiss HDF5 file conforming to the ODIM specification.
Parameters
----------
filename: str
Name of the file to import.
qty: {'RATE', 'ACRR', 'DBZH'}
The quantity to read from the file. The currently supported identitiers
are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
is 'RATE'.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the OPERA product for the requested
quantity and the associated quality field and metadata. The quality
field is read from the file if it contains a dataset whose quantity
identifier is 'QIND'.
"""
if not H5PY_IMPORTED:
raise MissingOptionalDependency(
"h5py package is required to import "
"radar reflectivity composites using ODIM HDF5 specification "
"but it is not installed"
)
if qty not in ["ACRR", "DBZH", "RATE"]:
raise ValueError(
"unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
)
f = h5py.File(filename, "r")
precip = None
quality = None
for dsg in f.items():
if dsg[0].startswith("dataset"):
what_grp_found = False
# check if the "what" group is in the "dataset" group
if "what" in list(dsg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_mch_hdf5_what_group(
dsg[1]["what"]
)
what_grp_found = True
for dg in dsg[1].items():
if dg[0][0:4] == "data":
# check if the "what" group is in the "data" group
if "what" in list(dg[1].keys()):
(
qty_,
gain,
offset,
nodata,
undetect,
) = _read_mch_hdf5_what_group(dg[1]["what"])
elif not what_grp_found:
raise DataModelError(
"Non ODIM compliant file: "
"no what group found from {} "
"or its subgroups".format(dg[0])
)
if qty_.decode() in [qty, "QIND"]:
arr = dg[1]["data"][...]
mask_n = arr == nodata
mask_u = arr == undetect
mask = np.logical_and(~mask_u, ~mask_n)
if qty_.decode() == qty:
precip = np.empty(arr.shape)
precip[mask] = arr[mask] * gain + offset
precip[mask_u] = np.nan
precip[mask_n] = np.nan
elif qty_.decode() == "QIND":
quality = np.empty(arr.shape, dtype=float)
quality[mask] = arr[mask]
quality[~mask] = np.nan
if precip is None:
raise IOError("requested quantity %s not found" % qty)
where = f["where"]
geodata = _import_mch_geodata()
metadata = geodata
# TODO: use those from the hdf5 file instead
# xpixelsize = where.attrs["xscale"] * 1000.0
# ypixelsize = where.attrs["yscale"] * 1000.0
# xsize = where.attrs["xsize"]
# ysize = where.attrs["ysize"]
if qty == "ACRR":
unit = "mm"
transform = None
elif qty == "DBZH":
unit = "dBZ"
transform = "dB"
else:
unit = "mm/h"
transform = None
if np.any(np.isfinite(precip)):
thr = np.nanmin(precip[precip > np.nanmin(precip)])
else:
thr = np.nan
metadata.update(
{
"yorigin": "upper",
"institution": "MeteoSwiss",
"accutime": 5.0,
"unit": unit,
"transform": transform,
"zerovalue": np.nanmin(precip),
"threshold": thr,
"zr_a": 316.0,
"zr_b": 1.5,
}
)
f.close()
return precip, quality, metadata
def _read_mch_hdf5_what_group(whatgrp):
qty = whatgrp.attrs["quantity"] if "quantity" in whatgrp.attrs.keys() else "RATE"
gain = whatgrp.attrs["gain"] if "gain" in whatgrp.attrs.keys() else 1.0
offset = whatgrp.attrs["offset"] if "offset" in whatgrp.attrs.keys() else 0.0
nodata = whatgrp.attrs["nodata"] if "nodata" in whatgrp.attrs.keys() else 0
undetect = whatgrp.attrs["undetect"] if "undetect" in whatgrp.attrs.keys() else -1.0
return qty, gain, offset, nodata, undetect
@postprocess_import()
def import_mch_metranet(filename, product, unit, accutime):
"""Import a 8-bit bin radar reflectivity composite from the MeteoSwiss
archive.
Parameters
----------
filename: str
Name of the file to import.
product: {"AQC", "CPC", "RZC", "AZC"}
The name of the MeteoSwiss QPE product.\n
Currently supported prducts:
+------+----------------------------+
| Name | Product |
+======+============================+
| AQC | Acquire |
+------+----------------------------+
| CPC | CombiPrecip |
+------+----------------------------+
| RZC | Precip |
+------+----------------------------+
| AZC | RZC accumulation |
+------+----------------------------+
unit: {"mm/h", "mm", "dBZ"}
the physical unit of the data
accutime: float
the accumulation time in minutes of the data
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the precipitation field in mm/h imported
from a MeteoSwiss gif file and the associated quality field and metadata.
The quality field is currently set to None.
"""
if not METRANET_IMPORTED:
raise MissingOptionalDependency(
"metranet package needed for importing MeteoSwiss "
"radar composites but it is not installed"
)
ret = metranet.read_file(filename, physic_value=True, verbose=False)
precip = ret.data
geodata = _import_mch_geodata()
# read metranet
metadata = geodata
metadata["institution"] = "MeteoSwiss"
metadata["accutime"] = accutime
metadata["unit"] = unit
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(precip)
metadata["threshold"] = _get_threshold_value(precip)
metadata["zr_a"] = 316.0
metadata["zr_b"] = 1.5
return precip, None, metadata
def _import_mch_geodata():
"""Swiss radar domain CCS4
These are all hard-coded because the georeferencing is missing from the gif files.
"""
geodata = {}
# LV03 Swiss projection definition in Proj4
projdef = ""
projdef += "+proj=somerc "
projdef += " +lon_0=7.43958333333333"
projdef += " +lat_0=46.9524055555556"
projdef += " +k_0=1"
projdef += " +x_0=600000"
projdef += " +y_0=200000"
projdef += " +ellps=bessel"
projdef += " +towgs84=674.374,15.056,405.346,0,0,0,0"
projdef += " +units=m"
projdef += " +no_defs"
geodata["projection"] = projdef
geodata["x1"] = 255000.0
geodata["y1"] = -160000.0
geodata["x2"] = 965000.0
geodata["y2"] = 480000.0
geodata["xpixelsize"] = 1000.0
geodata["ypixelsize"] = 1000.0
geodata["cartesian_unit"] = "m"
geodata["yorigin"] = "upper"
return geodata
@postprocess_import()
def import_odim_hdf5(filename, qty="RATE", **kwargs):
"""Import a precipitation field (and optionally the quality field) from a
HDF5 file conforming to the ODIM specification.
**Important:** Currently, only the Pan-European (OPERA) and the
Dipartimento della Protezione Civile (DPC) radar composites are correctly supported.
Other ODIM-compliant files may not be read correctly.
Parameters
----------
filename: str
Name of the file to import.
qty: {'RATE', 'ACRR', 'DBZH'}
The quantity to read from the file. The currently supported identitiers
are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
is 'RATE'.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the OPERA product for the requested
quantity and the associated quality field and metadata. The quality
field is read from the file if it contains a dataset whose quantity
identifier is 'QIND'.
"""
if not H5PY_IMPORTED:
raise MissingOptionalDependency(
"h5py package is required to import "
"radar reflectivity composites using ODIM HDF5 specification "
"but it is not installed"
)
if qty not in ["ACRR", "DBZH", "RATE"]:
raise ValueError(
"unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
)
f = h5py.File(filename, "r")
precip = None
quality = None
for dsg in f.items():
if dsg[0].startswith("dataset"):
what_grp_found = False
# check if the "what" group is in the "dataset" group
if "what" in list(dsg[1].keys()):
if "quantity" in dsg[1]["what"].attrs.keys():
qty_, gain, offset, nodata, undetect = _read_opera_hdf5_what_group(
dsg[1]["what"]
)
what_grp_found = True
for dg in dsg[1].items():
if dg[0][0:4] == "data":
# check if the "what" group is in the "data" group
if "what" in list(dg[1].keys()):
(
qty_,
gain,
offset,
nodata,
undetect,
) = _read_opera_hdf5_what_group(dg[1]["what"])
elif not what_grp_found:
raise DataModelError(
"Non ODIM compliant file: "
"no what group found from {} "
"or its subgroups".format(dg[0])
)
if qty_.decode() in [qty, "QIND"]:
arr = dg[1]["data"][...]
mask_n = arr == nodata
mask_u = arr == undetect
mask = np.logical_and(~mask_u, ~mask_n)
if qty_.decode() == qty:
precip = np.empty(arr.shape)
precip[mask] = arr[mask] * gain + offset
if qty != "DBZH":
precip[mask_u] = offset
else:
precip[mask_u] = -30.0
precip[mask_n] = np.nan
elif qty_.decode() == "QIND":
quality = np.empty(arr.shape, dtype=float)
quality[mask] = arr[mask]
quality[~mask] = np.nan
if quality is None:
for dgg in dg[
1
].items(): # da qui ----------------------------
if dgg[0][0:7] == "quality":
quality_keys = list(dgg[1].keys())
if "what" in quality_keys:
(
qty_,
gain,
offset,
nodata,
undetect,
) = _read_opera_hdf5_what_group(dgg[1]["what"])
if qty_.decode() == "QIND":
arr = dgg[1]["data"][...]
mask_n = arr == nodata
mask_u = arr == undetect
mask = np.logical_and(~mask_u, ~mask_n)
quality = np.empty(arr.shape) # , dtype=float)
quality[mask] = arr[mask] * gain + offset
quality[
~mask
] = np.nan # a qui -----------------------------
if precip is None:
raise IOError("requested quantity %s not found" % qty)
where = f["where"]
proj4str = where.attrs["projdef"].decode()
pr = pyproj.Proj(proj4str)
ll_lat = where.attrs["LL_lat"]
ll_lon = where.attrs["LL_lon"]
ur_lat = where.attrs["UR_lat"]
ur_lon = where.attrs["UR_lon"]
if (
"LR_lat" in where.attrs.keys()
and "LR_lon" in where.attrs.keys()
and "UL_lat" in where.attrs.keys()
and "UL_lon" in where.attrs.keys()
):
lr_lat = float(where.attrs["LR_lat"])
lr_lon = float(where.attrs["LR_lon"])
ul_lat = float(where.attrs["UL_lat"])
ul_lon = float(where.attrs["UL_lon"])
full_cornerpts = True
else:
full_cornerpts = False
ll_x, ll_y = pr(ll_lon, ll_lat)
ur_x, ur_y = pr(ur_lon, ur_lat)
if full_cornerpts:
lr_x, lr_y = pr(lr_lon, lr_lat)
ul_x, ul_y = pr(ul_lon, ul_lat)
x1 = min(ll_x, ul_x)
y1 = min(ll_y, lr_y)
x2 = max(lr_x, ur_x)
y2 = max(ul_y, ur_y)
else:
x1 = ll_x
y1 = ll_y
x2 = ur_x
y2 = ur_y
if "xscale" in where.attrs.keys() and "yscale" in where.attrs.keys():
xpixelsize = where.attrs["xscale"]
ypixelsize = where.attrs["yscale"]
else:
xpixelsize = None
ypixelsize = None
if qty == "ACRR":
unit = "mm"
transform = None
elif qty == "DBZH":
unit = "dBZ"
transform = "dB"
else:
unit = "mm/h"
transform = None
metadata = {
"projection": proj4str,
"ll_lon": ll_lon,
"ll_lat": ll_lat,
"ur_lon": ur_lon,
"ur_lat": ur_lat,
"x1": x1,
"y1": y1,
"x2": x2,
"y2": y2,
"xpixelsize": xpixelsize,
"ypixelsize": ypixelsize,
"cartesian_unit": "m",
"yorigin": "upper",
"institution": "Odyssey datacentre",
"accutime": 15.0,
"unit": unit,
"transform": transform,
"zerovalue": np.nanmin(precip),
"threshold": _get_threshold_value(precip),
}
f.close()
return precip, quality, metadata
def import_opera_hdf5(filename, qty="RATE", **kwargs):
"""
Wrapper to :py:func:`pysteps.io.importers.import_odim_hdf5`
to maintain backward compatibility with previous pysteps versions.
**Important:** Use :py:func:`~pysteps.io.importers.import_odim_hdf5` instead.
"""
return import_odim_hdf5(filename, qty=qty, **kwargs)
def _read_opera_hdf5_what_group(whatgrp):
qty = whatgrp.attrs["quantity"]
gain = whatgrp.attrs["gain"] if "gain" in whatgrp.attrs.keys() else 1.0
offset = whatgrp.attrs["offset"] if "offset" in whatgrp.attrs.keys() else 0.0
nodata = whatgrp.attrs["nodata"] if "nodata" in whatgrp.attrs.keys() else np.nan
undetect = whatgrp.attrs["undetect"] if "undetect" in whatgrp.attrs.keys() else 0.0
return qty, gain, offset, nodata, undetect
@postprocess_import()
def import_saf_crri(filename, extent=None, **kwargs):
"""Import a NetCDF radar rainfall product from the Convective Rainfall Rate
Intensity (CRRI) product from the Satellite Application Facilities (SAF).
Product description available on http://www.nwcsaf.org/crr_description
(last visited Jan 26, 2020).
Parameters
----------
filename: str
Name of the file to import.
extent: scalars (left, right, bottom, top), optional
The spatial extent specified in data coordinates.
If None, the full extent is imported.
{extra_kwargs_doc}
Returns
-------
out: tuple
A three-element tuple containing the rainfall field in mm/h, the quality
field and the metadata imported from the CRRI SAF netcdf file.
The quality field includes values [1, 2, 4, 8, 16, 24, 32] meaning
"nodata", "internal_consistency", "temporal_consistency", "good",
"questionable", "bad", and "interpolated", respectively.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required to import CRRI SAF products "
"but it is not installed"
)
geodata = _import_saf_crri_geodata(filename)
metadata = geodata
if extent:
xcoord = (
np.arange(metadata["x1"], metadata["x2"], metadata["xpixelsize"])
+ metadata["xpixelsize"] / 2
)
ycoord = (
np.arange(metadata["y1"], metadata["y2"], metadata["ypixelsize"])
+ metadata["ypixelsize"] / 2
)
ycoord = ycoord[::-1] # yorigin = "upper"
idx_x = np.logical_and(xcoord < extent[1], xcoord > extent[0])
idx_y = np.logical_and(ycoord < extent[3], ycoord > extent[2])
# update geodata
metadata["x1"] = xcoord[idx_x].min() - metadata["xpixelsize"] / 2
metadata["x2"] = xcoord[idx_x].max() + metadata["xpixelsize"] / 2
metadata["y1"] = ycoord[idx_y].min() - metadata["ypixelsize"] / 2
metadata["y2"] = ycoord[idx_y].max() + metadata["ypixelsize"] / 2
else:
idx_x = None
idx_y = None
precip, quality = _import_saf_crri_data(filename, idx_x, idx_y)
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(precip)
metadata["threshold"] = _get_threshold_value(precip)
return precip, quality, metadata
def _import_saf_crri_data(filename, idx_x=None, idx_y=None):
ds_rainfall = netCDF4.Dataset(filename)
if "crr_intensity" in ds_rainfall.variables.keys():
if idx_x is not None:
data = np.array(ds_rainfall.variables["crr_intensity"][idx_y, idx_x])
quality = np.array(ds_rainfall.variables["crr_quality"][idx_y, idx_x])
else:
data = np.array(ds_rainfall.variables["crr_intensity"])
quality = np.array(ds_rainfall.variables["crr_quality"])
precipitation = np.where(data == 65535, np.nan, data)
else:
precipitation = None
quality = None
ds_rainfall.close()
return precipitation, quality
def _import_saf_crri_geodata(filename):
geodata = {}
ds_rainfall = netCDF4.Dataset(filename)
# get projection
projdef = ds_rainfall.getncattr("gdal_projection")
geodata["projection"] = projdef
# get x1, y1, x2, y2, xpixelsize, ypixelsize, yorigin
geotable = ds_rainfall.getncattr("gdal_geotransform_table")
xmin = ds_rainfall.getncattr("gdal_xgeo_up_left")
xmax = ds_rainfall.getncattr("gdal_xgeo_low_right")
ymin = ds_rainfall.getncattr("gdal_ygeo_low_right")
ymax = ds_rainfall.getncattr("gdal_ygeo_up_left")
xpixelsize = abs(geotable[1])
ypixelsize = abs(geotable[5])
geodata["x1"] = xmin
geodata["y1"] = ymin
geodata["x2"] = xmax
geodata["y2"] = ymax
geodata["xpixelsize"] = xpixelsize
geodata["ypixelsize"] = ypixelsize
geodata["cartesian_unit"] = "m"
geodata["yorigin"] = "upper"
# get the accumulation period
geodata["accutime"] = None
# get the unit of precipitation
geodata["unit"] = ds_rainfall.variables["crr_intensity"].units
# get institution
geodata["institution"] = ds_rainfall.getncattr("institution")
ds_rainfall.close()
return geodata
|
bsd-3-clause
|
dkushner/zipline
|
tests/test_algorithm_gen.py
|
18
|
7339
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import (
timed,
nottest
)
from datetime import datetime
import pandas as pd
import pytz
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.finance import slippage
from zipline.utils import factory
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class RecordDateSlippage(slippage.FixedSlippage):
def __init__(self, spread):
super(RecordDateSlippage, self).__init__(spread=spread)
self.latest_date = None
def simulate(self, event, open_orders):
self.latest_date = event.dt
result = super(RecordDateSlippage, self).simulate(event, open_orders)
return result
class TestAlgo(TradingAlgorithm):
def __init__(self, asserter, *args, **kwargs):
super(TestAlgo, self).__init__(*args, **kwargs)
self.asserter = asserter
def initialize(self, window_length=100):
self.latest_date = None
self.set_slippage(RecordDateSlippage(spread=0.05))
self.stocks = [self.sid(8229)]
self.ordered = False
self.num_bars = 0
def handle_data(self, data):
self.num_bars += 1
self.latest_date = self.get_datetime()
if not self.ordered:
for stock in self.stocks:
self.order(stock, 100)
self.ordered = True
else:
self.asserter.assertGreaterEqual(
self.latest_date,
self.slippage.latest_date
)
class AlgorithmGeneratorTestCase(TestCase):
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@nottest
def test_lse_algorithm(self):
lse = trading.TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
with lse:
sim_params = factory.create_simulation_parameters(
start=datetime(2012, 5, 1, tzinfo=pytz.utc),
end=datetime(2012, 6, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
200,
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(len(results), 42)
# May 7, 2012 was an LSE holiday, confirm the 4th trading
# day was May 8.
self.assertEqual(results[4]['daily_perf']['period_open'],
datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
@timed(DEFAULT_TIMEOUT)
def test_generator_dates(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2011, 7, 30, tzinfo=pytz.utc),
end=datetime(2012, 7, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
self.assertTrue(list(gen))
self.assertTrue(algo.slippage.latest_date)
self.assertTrue(algo.latest_date)
@timed(DEFAULT_TIMEOUT)
def test_handle_data_on_market(self):
"""
Ensure that handle_data is only called on market minutes.
i.e. events that come in at midnight should be processed at market
open.
"""
from zipline.finance.trading import SimulationParameters
sim_params = SimulationParameters(
period_start=datetime(2012, 7, 30, tzinfo=pytz.utc),
period_end=datetime(2012, 7, 30, tzinfo=pytz.utc),
data_frequency='minute'
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
midnight_custom_source = [Event({
'custom_field': 42.0,
'sid': 'custom_data',
'source_id': 'TestMidnightSource',
'dt': pd.Timestamp('2012-07-30', tz='UTC'),
'type': DATASOURCE_TYPE.CUSTOM
})]
minute_event_source = [Event({
'volume': 100,
'price': 200.0,
'high': 210.0,
'open_price': 190.0,
'low': 180.0,
'sid': 8229,
'source_id': 'TestMinuteEventSource',
'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern').
tz_convert('UTC'),
'type': DATASOURCE_TYPE.TRADE
})]
algo.set_sources([midnight_custom_source, minute_event_source])
gen = algo.get_generator()
# Consume the generator
list(gen)
# Though the events had different time stamps, handle data should
# have only been called once, at the market open.
self.assertEqual(algo.num_bars, 1)
@timed(DEFAULT_TIMEOUT)
def test_progress(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2008, 1, 1, tzinfo=pytz.utc),
end=datetime(2008, 1, 5, tzinfo=pytz.utc)
)
algo = TestAlgo(self, sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(results[-2]['progress'], 1.0)
def test_benchmark_times_match_market_close_for_minutely_data(self):
"""
Benchmark dates should be adjusted so that benchmark events are
emitted at the end of each trading day when working with minutely
data.
Verification relies on the fact that there are no trades so
algo.datetime should be equal to the last benchmark time.
See https://github.com/quantopian/zipline/issues/241
"""
sim_params = create_simulation_parameters(num_days=1,
data_frequency='minute')
algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229])
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.datetime, sim_params.last_close)
|
apache-2.0
|
nesterione/scikit-learn
|
sklearn/tests/test_qda.py
|
155
|
3481
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
|
bsd-3-clause
|
Fireblend/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
127
|
7477
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
enstrophy/2dLBM
|
plot2dLDC.py
|
1
|
1266
|
#!/usr/bin/env python2
import matplotlib.pyplot as plt
import numpy as np
import math as math
data = np.fromfile('lidCavityResult.dat', dtype='double')
x, y = data[::3], data[1::3]
umag = data[2::3]
size = math.sqrt(len(umag))
umag = np.reshape(umag, (size, size))
x = np.reshape(x, (size, size))
y = np.reshape(y, (size, size))
plt.figure()
levels = np.linspace(0, 1, 11)
im = plt.contourf(x, y, umag, levels=levels)
#plt.contour(x, y, umag)
t = plt.title("Contour of Velocity Magnitude for Lid Driven Cavity at Re = 1000 MRT")
t.set_y(1.015)
plt.colorbar(im)
#plt.show()
plt.savefig('ldcFilledContourMRT.eps')
plt.close()
#Plot vertical velocity
d1 = np.loadtxt('verticalGhia.dau')
y1 = (d1[:,0]-1)/128
u1 = d1[:,1]
d2 = np.loadtxt('ldcVerticalCenterline.dat')
y2 = (d2[:,0])
u2 = d2[:,1]
plt.figure()
plt.plot(y1, u1, 'x', label='Ghia et. al.');
plt.plot(y2, u2, label='LBM');
plt.legend()
plt.savefig('ldcVerticalComparisonMRT.eps')
plt.close()
#Plot horizontal velocity
d1 = np.loadtxt('horizGhia.dau')
y1 = (d1[:,0]-1)/128
u1 = d1[:,1]
d2 = np.loadtxt('ldcHorizontalCenterline.dat')
y2 = (d2[:,0])
u2 = d2[:,1]
plt.figure()
plt.plot(y1, u1, 'x', label='Ghia et. al.');
plt.plot(y2, u2, label='LBM');
plt.savefig('ldcHorizComparisonMRT.eps')
plt.close()
|
gpl-2.0
|
jorik041/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
31
|
7929
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = self.named_estimators.copy()
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
MikeDacre/mike_tools
|
bin/normalize_effects.py
|
1
|
6812
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Normalize effect sizes for ATACseq
Created: 2016-47-26 13:09
Last modified: 2016-09-26 17:45
"""
import os
import sys
import argparse
from time import sleep
from multiprocessing import Pool
import numpy as np
import pandas as pd
import cluster
###############################################################################
# Normalization functions #
###############################################################################
def norm_both(df):
"""Normalize an effect size.
Intended to use with a pandas data frame.
Algorithm: if below line: pre-post/pre; if below line: pre-post/1-pre
:df: Pandas dataframe with the columns 'pre' and 'post'
:returns: Pandas dataframe with an additional 'norm_effect' column
"""
df['norm_effect'] = \
((df.pre > df.post) * (df.pre - df.post)/df.pre) + \
((df.pre <= df.post) * (df.pre - df.post)/(1 - df.pre))
return df
def norm_max(df):
"""Normalize an effect size.
Intended to use with a pandas data frame.
Algorithm: pre-post/max(pre, 1-pre)
:df: Pandas dataframe with the columns 'pre' and 'post'
:returns: Pandas dataframe with an additional 'norm_effect' column
"""
df['norm_effect'] = (df.pre - df.post)/df.pre.clip(lower=1-df.pre)
return df
def norm_min(df):
"""Normalize an effect size.
Intended to use with a pandas data frame.
Algorithm: pre-post/min(pre, 1-pre)
:df: Pandas dataframe with the columns 'pre' and 'post'
:returns: Pandas dataframe with an additional 'norm_effect' column
"""
df['norm_effect'] = (df.pre - df.post)/df.pre.clip(upper=1-df.pre)
return df
def stderr(df):
"""Calculate the stderr and variance on a dataframe.
First calculate a variance as sqrt(prevar + postvar)
Then calculate a z-score as pre-post/var
Next, calculate a stddev as abs(norm_effect/z)
Finally, create a beta variance as sqrt(stddev)
:df: Pandas dataframe with the columns 'pre' 'post' 'prevar' 'postvar'
'norm_effect'
:returns: Same dataframe with additional columns: 'z' 'vari' 'stddev'
'bvari' 'preminuspost'
"""
df['vari'] = np.sqrt(df.prevar + df.postvar)
df['z'] = (df.pre-df.post)/df.vari
df['stddev'] = (df.norm_effect/df.z).abs()
df['bvari'] = np.sqrt(df.stddev)
return df
def rename_snp(x, dct):
"""For use with Series.apply."""
try:
return dct[x]
except KeyError:
return np.nan
###############################################################################
# Main functions #
###############################################################################
def normalize_files(norm_by, prefix, rename_file, files):
"""Submit a normalization job for every file to the cluster.
:norm_by: max, min, or both
:prefix: What name to append to the output file names
:rename_file: A file with CHR.SNP\\tNew Name
:files: A list of files to submit
"""
pool = Pool(16)
jobs = []
for fl in files:
jobs.append(pool.apply_async(normalize_file,
(norm_by, prefix, rename_file, fl)))
for job in jobs:
job.get()
def normalize_file(norm_by, prefix, rename_file, infile):
"""Normalize the effect sizes by either max, min, or both
:norm_by: max, min, or both
:prefix: What name to append to the output file names
:rename_file: A file with CHR.SNP\\tNew Name
:infile: A tab delimited file with the following columns::
['chr', 'position', 'ref', 'depth', 'post',
'pre', 'pval', 'prevar', 'postvar']
Columns must be in exactly that order, column names are ignored.
MAF is limited to between 0.02 and 0.98.
Output is chr.name\\tpop\\teffect\\tstderr
chr.name is renamed by the index in rename_file.
"""
# Read dataframe
df = pd.read_csv(infile, sep='\t')
# Rename columns
df.columns = ['chrom', 'position', 'ref', 'depth', 'post',
'pre', 'pval', 'prevar', 'postvar']
df.sort_values(['chrom', 'position'], inplace=True)
# Filter MAF
df = df[df.pre < 0.98]
df = df[df.pre > 0.02]
# Get population from name
path, name = os.path.split(infile)
pop = name.split('.')[0]
df['pop'] = pop
# Create name
df['snp'] = df.chrom + '.' + df.position.apply(str)
# Get renamed name
with open(rename_file) as fin:
rename_dict = {}
for line in fin:
orig, new = line.rstrip().split('\t')
rename_dict[orig.strip()] = new.strip()
df['name'] = df.snp.apply(rename_snp, args=(rename_dict,))
del(rename_dict)
# Normalize effect
if norm_by == 'max':
norm_func = norm_max
elif norm_by == 'min':
norm_func = norm_min
elif norm_by == 'both':
norm_func = norm_both
else:
raise Exception("Norm by value not recognized {}".format(norm_by))
df = norm_func(df)
# Calculate z-score and errors
df = stderr(df)
# Reorganize columns
df = df[['chrom', 'position', 'snp', 'name', 'pop', 'ref', 'depth', 'pre',
'post', 'prevar', 'postvar', 'z', 'vari', 'pval', 'norm_effect',
'stddev', 'bvari']]
# Print output
g = df[df.stddev.notnull()]
beta_df = g[['name', 'pop', 'norm_effect', 'bvari']]
beta_df = beta_df[beta_df.name.notnull()]
df.to_pickle(os.path.join(path, prefix + '.' + name + '.pandas'))
beta_df.to_csv(os.path.join(path, prefix + '.' + name + '.betas.txt'),
index=False, sep='\t')
def main(argv=None):
"""Parse command line args. """
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Positional arguments
parser.add_argument('rename_file',
help="File to use to name by peak. (SNP\\tname)")
parser.add_argument('files', nargs='+',
help="Input files from Ashlye's pipleine")
# Optional flags
parser.add_argument('-n', '--normalize-by', choices={'max', 'min', 'both'},
default='both', help="Which factor to normalize by")
parser.add_argument('-p', '--prefix', default='normalized',
help="Prefix to use on output files")
args = parser.parse_args(argv)
normalize_files(args.normalize_by, args.prefix, args.rename_file,
args.files)
if __name__ == '__main__' and '__file__' in globals():
sys.exit(main())
|
unlicense
|
neherlab/ffpopsim
|
examples/genealogies_with_selection.py
|
2
|
2468
|
import FFPopSim as h
import numpy as np
from matplotlib import pyplot as plt
import random as rd
from Bio import Phylo
print "This script is meant to illustrate and explore the effect of\n\
positive selection on genealogies in asexual and sexual populations. \n\n\
Simulations are performed using an infinite sites model with L segregating\n\
sites at which mutations with identical beneficial effect are injected.\n\n"
#suggested values
#neutral asexual: N=100 s=0.00001 r=0.0
#selected asexual: N=10000 s=0.01 r=0.0
#selected sexual: N=1000 s=0.01 r=1.0
L = 1000 #number of segregating sites
s = 1e-2 #single site effect
N = 10000 #population size
r = 0.0 #outcrossing rate
sample_size=30 #number of individuals whose genealogy is looked at
nsamples = 3 #number of trees
burnin = 2000 #either ~5*N or 5/s, depending on whether coalescence is dominated by drift or draft
dt = 1000 #time between samples
#set up population, switch on infinite sites mode
pop=h.haploid_highd(L, all_polymorphic=True)
#set the population size via the carrying capacity
pop.carrying_capacity= N
#set the crossover rate, outcrossing_rate and recombination model
pop.outcrossing_rate = r
pop.recombination_model = h.CROSSOVERS
pop.crossover_rate = 1.0/pop.L
#set the effect sizes of the mutations that are injected (the same at each site in this case)
pop.set_fitness_additive(np.ones(L)*s)
#track the genealogy at a central locus L/2 (which one doesn't matter in the asexual case)
pop.track_locus_genealogy([L/2])
#initialize the populations
pop.set_wildtype(pop.carrying_capacity)
print "Population parameters:"
pop.status()
#burn in
print "\nEquilibrate:"
while pop.generation<burnin:
print "Burn in: at", pop.generation, "out of", burnin, "generations"
pop.evolve(100)
print "\nPlot coalescent trees:"
fig=plt.figure(figsize=(7,10))
fig.suptitle("".join(map(str,['N=',N,' r=',r,' L=',L, ' s=',s])), fontsize=18)
for si in xrange(nsamples):
print "sample",si,"out of",nsamples
#evolve a while before sampling the next tree
pop.evolve(dt)
#draw a sample from the population, convert its genealogy to a BioPython tree object and plot
tree = pop.genealogy.get_tree(L/2)
subtree = tree.create_subtree_from_keys(rd.sample(tree.leafs,sample_size)).to_Biopython_tree()
subtree.ladderize()
plt.subplot(3,1,si+1)
Phylo.draw(subtree,label_func=lambda x:"")
plt.draw()
plt.savefig("".join(map(str,['tree_', 'N=',N,'_r=',r,'_L=',L, '_s=',s,'.pdf'])))
|
gpl-3.0
|
cjgunase/ThinkStats2
|
code/scatter.py
|
69
|
4281
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
|
gpl-3.0
|
zuku1985/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
36
|
5023
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/tslibs/test_array_to_datetime.py
|
1
|
6091
|
from datetime import date, datetime
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, tslib
from pandas.compat.numpy import np_array_datetime64_compat
from pandas import Timestamp
import pandas._testing as tm
@pytest.mark.parametrize(
"data,expected",
[
(
["01-01-2013", "01-02-2013"],
[
"2013-01-01T00:00:00.000000000-0000",
"2013-01-02T00:00:00.000000000-0000",
],
),
(
["Mon Sep 16 2013", "Tue Sep 17 2013"],
[
"2013-09-16T00:00:00.000000000-0000",
"2013-09-17T00:00:00.000000000-0000",
],
),
],
)
def test_parsing_valid_dates(data, expected):
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dt_string, expected_tz",
[
["01-01-2013 08:00:00+08:00", 480],
["2013-01-01T08:00:00.000000000+0800", 480],
["2012-12-31T16:00:00.000000000-0800", -480],
["12-31-2012 23:00:00-01:00", -60],
],
)
def test_parsing_timezone_offsets(dt_string, expected_tz):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added.
arr = np.array(["01-01-2013 00:00:00"], dtype=object)
expected, _ = tslib.array_to_datetime(arr)
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(expected_tz)
def test_parsing_non_iso_timezone_offset():
dt_string = "01-01-2013T00:00:00.000000000+0000"
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")])
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(0)
def test_parsing_different_timezone_offsets():
# see gh-17697
data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"]
data = np.array(data, dtype=object)
result, result_tz = tslib.array_to_datetime(data)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 23400)),
],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is None
@pytest.mark.parametrize(
"data", [["-352.737091", "183.575577"], ["1", "2", "3", "4", "5"]]
)
def test_number_looking_strings_not_into_datetime(data):
# see gh-4601
#
# These strings don't look like datetimes, so
# they shouldn't be attempted to be converted.
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr, errors="ignore")
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize(
"invalid_date",
[
date(1000, 1, 1),
datetime(1000, 1, 1),
"1000-01-01",
"Jan 1, 1000",
np.datetime64("1000-01-01"),
],
)
@pytest.mark.parametrize("errors", ["coerce", "raise"])
def test_coerce_outside_ns_bounds(invalid_date, errors):
arr = np.array([invalid_date], dtype="object")
kwargs = {"values": arr, "errors": errors}
if errors == "raise":
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
tslib.array_to_datetime(**kwargs)
else: # coerce.
result, _ = tslib.array_to_datetime(**kwargs)
expected = np.array([iNaT], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_coerce_outside_ns_bounds_one_valid():
arr = np.array(["1/1/1000", "1/1/2000"], dtype=object)
result, _ = tslib.array_to_datetime(arr, errors="coerce")
expected = [iNaT, "2000-01-01T00:00:00.000000000-0000"]
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("errors", ["ignore", "coerce"])
def test_coerce_of_invalid_datetimes(errors):
arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object)
kwargs = {"values": arr, "errors": errors}
if errors == "ignore":
# Without coercing, the presence of any invalid
# dates prevents any values from being converted.
result, _ = tslib.array_to_datetime(**kwargs)
tm.assert_numpy_array_equal(result, arr)
else: # coerce.
# With coercing, the invalid dates becomes iNaT
result, _ = tslib.array_to_datetime(arr, errors="coerce")
expected = ["2013-01-01T00:00:00.000000000-0000", iNaT, iNaT]
tm.assert_numpy_array_equal(
result, np_array_datetime64_compat(expected, dtype="M8[ns]")
)
def test_to_datetime_barely_out_of_bounds():
# see gh-19382, gh-19529
#
# Close enough to bounds that dropping nanos
# would result in an in-bounds datetime.
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
tslib.array_to_datetime(arr)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"data,expected",
[
([SubDatetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000-0000"]),
([datetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000-0000"]),
([Timestamp(2000, 1, 1)], ["2000-01-01T00:00:00.000000000-0000"]),
],
)
def test_datetime_subclass(data, expected):
# GH 25851
# ensure that subclassed datetime works with
# array_to_datetime
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
|
bsd-3-clause
|
breznak/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/blocking_input.py
|
69
|
12119
|
"""
This provides several classes used for blocking interaction with figure windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
import time
import numpy as np
from matplotlib import path, verbose
from matplotlib.cbook import is_sequence_of_strings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
assert is_sequence_of_strings(eventslist), "Requires a sequence of event name strings"
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks=[]
def add_event(self,event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self,index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self,index=-1):
self.pop_event(index)
pop.__doc__=pop_event.__doc__
def __call__(self, n=1, timeout=30 ):
"""
Blocking call to retrieve n events
"""
assert isinstance(n, int), "Requires an integer argument"
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append( self.fig.canvas.mpl_connect(n, self.on_event) )
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event') )
def post_event(self):
"""
This will be called to process events
"""
assert len(self.events)>0, "No events yet"
if self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == 3:
self.button3(event)
elif button == 2:
self.button2(event)
else:
self.button1(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
key = event.key
if key == 'backspace' or key == 'delete':
self.button3(event)
elif key == 'enter':
self.button2(event)
else:
self.button1(event)
def button1( self, event ):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self)
def button2( self, event ):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self)
# This will exit even if not in infinite mode. This is
# consistent with matlab and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def button3( self, event ):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self)
# Now remove any existing clicks if possible
if len(self.events)>0:
self.pop()
def add_click(self,event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata,event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks),event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
self.marks.extend(
event.inaxes.plot([event.xdata,], [event.ydata,], 'r+') )
self.fig.canvas.draw()
def pop_click(self,index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
def pop(self,index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(index)
BlockingInput.pop(self,index)
def cleanup(self):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self,n=n,timeout=timeout)
return self.clicks
class BlockingContourLabeler( BlockingMouseInput ):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self,cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure )
def button1(self,event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
cs = self.cs
if event.inaxes == cs.ax:
conmin,segmin,imin,xmin,ymin = cs.find_nearest_contour(
event.x, event.y, cs.labelIndiceList)[:5]
# Get index of nearest level in subset of levels used for labeling
lmin = cs.labelIndiceList.index(conmin)
# Coordinates of contour
paths = cs.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = cs.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = cs.get_label_width(cs.labelLevelList[lmin],
cs.labelFmt, cs.labelFontSizeList[lmin])
"""
# requires python 2.5
# Figure out label rotation.
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lc if self.inline else [],
self.inline_spacing )
"""
# Figure out label rotation.
if self.inline: lcarg = lc
else: lcarg = None
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
self.inline_spacing )
cs.add_label(xmin,ymin,rotation,cs.labelLevelList[lmin],
cs.labelCValueList[lmin])
if self.inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n)>1:
paths.append( path.Path(n) )
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self,event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
# Remove this last event - not too important for clabel use
# since clabel normally doesn't have a maximum number of
# events, but best for cleanliness sake.
BlockingInput.pop(self)
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self,inline,inline_spacing=5,n=-1,timeout=-1):
self.inline=inline
self.inline_spacing=inline_spacing
BlockingMouseInput.__call__(self,n=n,timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=('button_press_event','key_press_event') )
def post_event(self):
"""
Determines if it is a key event
"""
assert len(self.events)>0, "No events yet"
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self,n=1,timeout=timeout)
return self.keyormouse
|
agpl-3.0
|
phac-nml/ecoli_serotyping
|
test/test_ectyper_integration.py
|
1
|
6826
|
import sys
import pytest
import tempfile
import os
from ectyper import ectyper
import subprocess
import pandas
import logging
import re
TEST_ROOT = os.path.dirname(__file__)
logging.basicConfig(level=logging.INFO)
LOG=logging.getLogger("TEST")
def set_input(input,
percent_iden=None,
verify = True,
output=tempfile.mkdtemp(),
cores=1,
print_sequence=False):
"""
Create the sys.argv[] without need for commandline input.
:param input: Input file given by testing function
:param percent_iden: Percent identity for comparison
:param output: Location of output
:return: None
"""
args = ['-i', input,
#'-r', os.path.join(TEST_ROOT, 'Data/test_sketch.msh'),
'-c', str(cores)]
if percent_iden:
args += ['-d', str(percent_iden)]
if verify:
args += ['--verify']
if output:
args += ['-o', output]
if print_sequence:
args += ['--sequence']
sys.argv[1:] = args
def test_integration_invalid_file(caplog):
"""
Giving a non-fasta file in fasta-file name.
:return: None
"""
caplog.set_level(logging.DEBUG)
file = os.path.join(TEST_ROOT, 'Data/test_dir/badfasta.fasta')
set_input(input=file)
ectyper.run_program()
assert "Non fasta / fastq file" in caplog.text
def test_integration_no_file():
"""
Giving no input to the program.
:return: None
"""
file = ''
set_input(file)
with pytest.raises(FileNotFoundError) as se:
ectyper.run_program()
assert se.type == FileNotFoundError
assert str(se.value) == "No files were found to run on"
def test_integration_valid_file(caplog):
"""
Ensure a valid E. coli fasta passes
:return: None
"""
file = os.path.join(TEST_ROOT, 'Data/Escherichia.fna')
set_input(file)
ectyper.run_program()
print(caplog.text)
assert "PASS (REPORTABLE)" in caplog.text
assert "O103:H2" in caplog.text
assert "Escherichia coli" in caplog.text
def test_integration_yersinia(caplog):
"""
Ensure a non-E. coli gets categorized as such
:return: None
"""
file = os.path.join(TEST_ROOT, 'Data/Yersinia.fasta')
set_input(file)
ectyper.run_program()
assert "Yersinia pestis" in caplog.text
assert "WARNING (WRONG SPECIES)" in caplog.text
def test_integration_validfasta_noverify(caplog):
"""
Tests for fasta files without E.coli species verify function (--verify) do not fail as per issue #76 (https://github.com/phac-nml/ecoli_serotyping/issues/76)
:return: None
"""
file = os.path.join(TEST_ROOT, 'Data/Escherichia.fna')
set_input(file, verify=False)
ectyper.run_program()
assert "O103\tH2\tO103:H2" in caplog.text
assert "Escherichia\t-\tO103\tH2" in caplog.text
def test_valid_fastq_file(caplog):
"""
Given a valid fastq file, get the correct results.
Use a temp dir for the test output
:return: None
"""
file = os.path.join(TEST_ROOT, 'Data/Escherichia.fastq')
set_input(file, verify=False)
ectyper.run_program()
assert "O22:H8" in caplog.text
def test_valid_fastq_file_with_verify(caplog):
"""
Given a valid fastq file with low genome coverage, test species verification fail
Use a temp dir for the test output
:return: None
"""
file = os.path.join(TEST_ROOT, 'Data/Escherichia.fastq')
set_input(file, verify=True)
ectyper.run_program()
assert "Escherichia coli" in caplog.text
def test_multiple_directories(caplog):
"""
Check a number of small files, some good, some bad,
within a nested directory structure.
:param caplog: Capture logging output for pytest
:return: None
"""
the_dir = os.path.join(TEST_ROOT, 'Data/test_dir')
set_input(the_dir, cores=4, verify=True, print_sequence=True)
ectyper.run_program()
assert any([True if re.match(r".+sample2.+WARNING\s+\(WRONG\s+SPECIES\).+Sample identified as -", line) else False for line in caplog.text.splitlines()]) #O148:H44
assert any([True if re.match(r".+sample3.+WARNING\s+\(WRONG\s+SPECIES\).+Sample identified as -", line) else False for line in caplog.text.splitlines()]) #O148:H44
assert any([True if re.match(r".+sample4.+WARNING\s+\(WRONG\s+SPECIES\).+Sample identified as -", line) else False for line in caplog.text.splitlines()]) #O148:H44
assert any([True if re.match(r".+badfasta.+WARNING\s+\(WRONG\s+SPECIES\).+Non fasta / fastq file", line) else False for line in caplog.text.splitlines()])
assert any([True if re.match(r".+sample.fasta.+WARNING\s+\(WRONG\s+SPECIES\).+Non fasta / fastq file", line) else False for line in caplog.text.splitlines()])
assert any([True if re.match(r".+sampletar.+WARNING\s+\(WRONG\s+SPECIES\).+Non fasta / fastq file", line) else False for line in caplog.text.splitlines()])
assert any([True if re.match(r".+test_junk.+WARNING\s+\(WRONG\s+SPECIES\).+Non fasta / fastq file", line) else False for line in caplog.text.splitlines()])
def test_mash_sketch_and_assembly_metadata():
"""
Test if all accessions in mash sketch are a complete subset of the assembly stats superset.
Ensure that all accession numbers are represented in the meta data assembly stats
"""
ectyper.speciesIdentification.get_refseq_mash_and_assembly_summary()
ROOT_DIR = os.path.abspath(os.path.join(TEST_ROOT, '..'))
MASHSTATSMETAFILE=os.path.join(TEST_ROOT+"/mash_refseq_meta.txt")
MASHINFILE = os.path.join(ROOT_DIR, 'ectyper/Data/refseq.genomes.k21s1000.msh')
ASSEMBLYREFSEQMETAFILE = os.path.join(ROOT_DIR, 'ectyper/Data/assembly_summary_refseq.txt')
cmd = ["mash info -t " + MASHINFILE + " > " + MASHSTATSMETAFILE]
print("File written to {}".format(MASHSTATSMETAFILE))
subprocess.run(cmd, shell=True)
mashsketchdatadf = pandas.read_csv(MASHSTATSMETAFILE,sep="\t")
mashaccessions=[re.findall(r"(GCF_\d+)\.+",item)[0] for item in mashsketchdatadf.iloc[:,2].values.tolist()]
LOG.info("Extracted {} MASH RefSeq accessions".format(len(mashaccessions)))
genomeassemblystatrefseqsdf = pandas.read_csv(ASSEMBLYREFSEQMETAFILE, sep="\t", skiprows=1)
metaaccessionsrefseq = [re.findall(r"(GCF_\d+)\.+",item)[0] for item in genomeassemblystatrefseqsdf.iloc[:, 0].values.tolist()]
metaaccessionsrefseqdict = dict.fromkeys(metaaccessionsrefseq, True)
LOG.info("Extracted {} assembly metadata accessions from {}".format(len(metaaccessionsrefseq),MASHSTATSMETAFILE))
notfoundaccessions = []
for accession in mashaccessions:
if not metaaccessionsrefseqdict.get(accession):
notfoundaccessions.append(accession)
LOG.info("{} of accessions not found in metadata file {}".format(len(notfoundaccessions),MASHSTATSMETAFILE))
|
apache-2.0
|
admcrae/tensorflow
|
tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py
|
116
|
5164
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
heli522/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
142
|
6276
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
cg31/tensorflow
|
tensorflow/examples/learn/iris_custom_model.py
|
12
|
2592
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Create two tensors respectively for prediction and loss.
prediction, loss = (
tf.contrib.learn.models.logistic_regression(features, target)
)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
courtarro/gnuradio
|
gr-dtv/examples/atsc_ctrlport_monitor.py
|
21
|
6089
|
#!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
import matplotlib
matplotlib.use("QT4Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
import scipy
from scipy import fftpack
"""
If a host is running the ATSC receiver chain with ControlPort
turned on, this script will connect to the host using the hostname and
port pair of the ControlPort instance and display metrics of the
receiver. The ATSC publishes information about the succes of the
Reed-Solomon decoder and Viterbi metrics for use here in displaying
the link quality. This also gets the equalizer taps of the receiver
and displays the frequency response.
"""
class atsc_ctrlport_monitor:
def __init__(self, host, port):
argv = [None, host, port]
radiosys = GNURadioControlPortClient(argv=argv, rpcmethod='thrift')
self.radio = radiosys.client
print self.radio
vt_init_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
data = self.radio.getKnobs([vt_init_key])[vt_init_key]
init_metric = scipy.mean(data.value)
self._viterbi_metric = 100*[init_metric,]
table_col_labels = ('Num Packets', 'Error Rate', 'Packet Error Rate',
'Viterbi Metric', 'SNR')
self._fig = plt.figure(1, figsize=(12,12), facecolor='w')
self._sp0 = self._fig.add_subplot(4,1,1)
self._sp1 = self._fig.add_subplot(4,1,2)
self._sp2 = self._fig.add_subplot(4,1,3)
self._plot_taps = self._sp0.plot([], [], 'k', linewidth=2)
self._plot_psd = self._sp1.plot([], [], 'k', linewidth=2)
self._plot_data = self._sp2.plot([], [], 'ok', linewidth=2, markersize=4, alpha=0.05)
self._ax2 = self._fig.add_subplot(4,1,4)
self._table = self._ax2.table(cellText=[len(table_col_labels)*['0']],
colLabels=table_col_labels,
loc='center')
self._ax2.axis('off')
cells = self._table.properties()['child_artists']
for c in cells:
c.set_lw(0.1) # set's line width
c.set_ls('solid')
c.set_height(0.2)
ani = animation.FuncAnimation(self._fig, self.update_data, frames=200,
fargs=(self._plot_taps[0], self._plot_psd[0],
self._plot_data[0], self._table),
init_func=self.init_function,
blit=True)
plt.show()
def update_data(self, x, taps, psd, syms, table):
try:
eqdata_key = 'dtv_atsc_equalizer0::taps'
symdata_key = 'dtv_atsc_equalizer0::data'
rs_nump_key = 'dtv_atsc_rs_decoder0::num_packets'
rs_numbp_key = 'dtv_atsc_rs_decoder0::num_bad_packets'
rs_numerrs_key = 'dtv_atsc_rs_decoder0::num_errors_corrected'
vt_metrics_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
snr_key = 'probe2_f0::SNR'
data = self.radio.getKnobs([])
eqdata = data[eqdata_key]
symdata = data[symdata_key]
rs_num_packets = data[rs_nump_key]
rs_num_bad_packets = data[rs_numbp_key]
rs_num_errors_corrected = data[rs_numerrs_key]
vt_decoder_metrics = data[vt_metrics_key]
snr_est = data[snr_key]
vt_decoder_metrics = scipy.mean(vt_decoder_metrics.value)
self._viterbi_metric.pop()
self._viterbi_metric.insert(0, vt_decoder_metrics)
except:
sys.stderr.write("Lost connection, exiting")
sys.exit(1)
ntaps = len(eqdata.value)
taps.set_ydata(eqdata.value)
taps.set_xdata(xrange(ntaps))
self._sp0.set_xlim(0, ntaps)
self._sp0.set_ylim(min(eqdata.value), max(eqdata.value))
fs = 6.25e6
freq = scipy.linspace(-fs/2, fs/2, 10000)
H = fftpack.fftshift(fftpack.fft(eqdata.value, 10000))
HdB = 20.0*scipy.log10(abs(H))
psd.set_ydata(HdB)
psd.set_xdata(freq)
self._sp1.set_xlim(0, fs/2)
self._sp1.set_ylim([min(HdB), max(HdB)])
self._sp1.set_yticks([min(HdB), max(HdB)])
self._sp1.set_yticklabels(["min", "max"])
nsyms = len(symdata.value)
syms.set_ydata(symdata.value)
syms.set_xdata(nsyms*[0,])
self._sp2.set_xlim([-1, 1])
self._sp2.set_ylim([-10, 10])
per = float(rs_num_bad_packets.value) / float(rs_num_packets.value)
ber = float(rs_num_errors_corrected.value) / float(187*rs_num_packets.value)
table._cells[(1,0)]._text.set_text("{0}".format(rs_num_packets.value))
table._cells[(1,1)]._text.set_text("{0:.2g}".format(ber))
table._cells[(1,2)]._text.set_text("{0:.2g}".format(per))
table._cells[(1,3)]._text.set_text("{0:.1f}".format(scipy.mean(self._viterbi_metric)))
table._cells[(1,4)]._text.set_text("{0:.4f}".format(snr_est.value[0]))
return (taps, psd, syms, table)
def init_function(self):
return self._plot_taps + self._plot_psd + self._plot_data
if __name__ == "__main__":
host = sys.argv[1]
port = sys.argv[2]
m = atsc_ctrlport_monitor(host, port)
|
gpl-3.0
|
chichilalescu/bfps
|
tests/test_field_class.py
|
1
|
6948
|
import numpy as np
import h5py
import matplotlib.pyplot as plt
import pyfftw
import bfps
import bfps.tools
import os
from bfps._fluid_base import _fluid_particle_base
class TestField(_fluid_particle_base):
def __init__(
self,
name = 'TestField-v' + bfps.__version__,
work_dir = './',
simname = 'test',
fluid_precision = 'single',
use_fftw_wisdom = False):
_fluid_particle_base.__init__(
self,
name = name + '-' + fluid_precision,
work_dir = work_dir,
simname = simname,
dtype = fluid_precision,
use_fftw_wisdom = use_fftw_wisdom)
self.fill_up_fluid_code()
self.finalize_code()
return None
def fill_up_fluid_code(self):
self.fluid_includes += '#include <cstring>\n'
self.fluid_includes += '#include "fftw_tools.hpp"\n'
self.fluid_includes += '#include "field.hpp"\n'
self.fluid_variables += ('field<' + self.C_dtype + ', FFTW, ONE> *f;\n' +
'field<' + self.C_dtype + ', FFTW, THREE> *v;\n' +
'kspace<FFTW, SMOOTH> *kk;\n')
self.fluid_start += """
//begincpp
f = new field<{0}, FFTW, ONE>(
nx, ny, nz, MPI_COMM_WORLD);
v = new field<{0}, FFTW, THREE>(
nx, ny, nz, MPI_COMM_WORLD);
kk = new kspace<FFTW, SMOOTH>(
f->clayout, 1., 1., 1.);
// read rdata
f->real_space_representation = true;
f->io("field.h5", "scal", 0, true);
// go to fourier space, write into cdata_tmp
f->dft();
f->io("field.h5", "scal_tmp", 0, false);
f->ift();
f->io("field.h5", "scal", 0, false);
f->real_space_representation = false;
f->io("field.h5", "scal", 0, true);
hid_t gg;
if (f->myrank == 0)
gg = H5Fopen("field.h5", H5F_ACC_RDWR, H5P_DEFAULT);
kk->cospectrum<float, ONE>(
f->get_cdata(),
f->get_cdata(),
gg,
"scal",
0);
f->ift();
f->io("field.h5", "scal_tmp", 0, false);
std::vector<double> me;
me.resize(1);
me[0] = 30;
f->compute_rspace_stats(
gg, "scal",
0, me);
if (f->myrank == 0)
H5Fclose(gg);
v->real_space_representation = false;
v->io("field.h5", "vec", 0, true);
v->io("field.h5", "vec_tmp", 0, false);
//endcpp
""".format(self.C_dtype)
self.fluid_end += """
//begincpp
delete f;
delete v;
//endcpp
"""
return None
def specific_parser_arguments(
self,
parser):
_fluid_particle_base.specific_parser_arguments(self, parser)
return None
def launch(
self,
args = [],
**kwargs):
opt = self.prepare_launch(args)
self.parameters['niter_todo'] = 0
self.pars_from_namespace(opt)
self.set_host_info(bfps.host_info)
self.write_par()
self.run(ncpu = opt.ncpu)
return None
def main():
n = 32
kdata = pyfftw.n_byte_align_empty(
(n, n, n//2 + 1),
pyfftw.simd_alignment,
dtype = np.complex64)
rdata = pyfftw.n_byte_align_empty(
(n, n, n),
pyfftw.simd_alignment,
dtype = np.float32)
c2r = pyfftw.FFTW(
kdata.transpose((1, 0, 2)),
rdata,
axes = (0, 1, 2),
direction = 'FFTW_BACKWARD',
threads = 2)
kdata[:] = bfps.tools.generate_data_3D(n, n, n, dtype = np.complex64)
cdata = kdata.copy()
c2r.execute()
tf = TestField()
tf.parameters['nx'] = n
tf.parameters['ny'] = n
tf.parameters['nz'] = n
f = h5py.File('field.h5', 'w')
f['scal/complex/0'] = cdata
f['scal/real/0'] = rdata
f['vec/complex/0'] = np.array([cdata, cdata, cdata]).reshape(cdata.shape + (3,))
f['vec/real/0'] = np.array([rdata, rdata, rdata]).reshape(rdata.shape + (3,))
f['moments/scal'] = np.zeros(shape = (1, 10)).astype(np.float)
f['histograms/scal'] = np.zeros(shape = (1, 64)).astype(np.float)
kspace = tf.get_kspace()
nshells = kspace['nshell'].shape[0]
f['spectra/scal'] = np.zeros(shape = (1, nshells)).astype(np.float64)
f.close()
## run cpp code
tf.launch(
['-n', '{0}'.format(n),
'--ncpu', '2'])
f = h5py.File('field.h5', 'r')
#err0 = np.max(np.abs(f['scal_tmp/real/0'].value - rdata)) / np.mean(np.abs(rdata))
#err1 = np.max(np.abs(f['scal/real/0'].value/(n**3) - rdata)) / np.mean(np.abs(rdata))
#err2 = np.max(np.abs(f['scal_tmp/complex/0'].value/(n**3) - cdata)) / np.mean(np.abs(cdata))
#print(err0, err1, err2)
#assert(err0 < 1e-5)
#assert(err1 < 1e-5)
#assert(err2 < 1e-4)
## compare
fig = plt.figure(figsize=(18, 6))
a = fig.add_subplot(131)
a.set_axis_off()
v0 = f['vec/complex/0'][:, :, 0, 0]
v1 = f['vec_tmp/complex/0'][:, :, 0, 0]
a.imshow(np.log(np.abs(v0 - v1)),
interpolation = 'none')
a = fig.add_subplot(132)
a.set_axis_off()
a.imshow(np.log(np.abs(v0)),
interpolation = 'none')
a = fig.add_subplot(133)
a.set_axis_off()
a.imshow(np.log(np.abs(v1)),
interpolation = 'none')
fig.tight_layout()
fig.savefig('tst_fields.pdf')
fig = plt.figure(figsize=(18, 6))
a = fig.add_subplot(131)
a.set_axis_off()
v0 = f['scal/complex/0'][:, :, 0]
v1 = f['scal_tmp/complex/0'][:, :, 0]
a.imshow(np.log(np.abs(v0 - v1)),
interpolation = 'none')
a = fig.add_subplot(132)
a.set_axis_off()
a.imshow(np.log(np.abs(v0)),
interpolation = 'none')
a = fig.add_subplot(133)
a.set_axis_off()
a.imshow(np.log(np.abs(v1)),
interpolation = 'none')
fig.tight_layout()
fig.savefig('tst_sfields.pdf')
# look at moments and histogram
#print('moments are ', f['moments/scal'][0])
#fig = plt.figure(figsize=(6,6))
#a = fig.add_subplot(211)
#a.plot(f['histograms/scal'][0])
#a.set_yscale('log')
#a = fig.add_subplot(212)
#a.plot(f['spectra/scal'][0])
#a.set_xscale('log')
#a.set_yscale('log')
#fig.tight_layout()
#fig.savefig('tst.pdf')
return None
if __name__ == '__main__':
main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.