repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
datapythonista/pandas
pandas/tests/arrays/boolean/test_astype.py
8
1603
import numpy as np import pytest import pandas as pd import pandas._testing as tm def test_astype(): # with missing values arr = pd.array([True, False, None], dtype="boolean") with pytest.raises(ValueError, match="cannot convert NA to integer"): arr.astype("int64") with pytest.raises(ValueError, match="cannot convert float NaN to"): arr.astype("bool") result = arr.astype("float64") expected = np.array([1, 0, np.nan], dtype="float64") tm.assert_numpy_array_equal(result, expected) result = arr.astype("str") expected = np.array(["True", "False", "<NA>"], dtype="<U5") tm.assert_numpy_array_equal(result, expected) # no missing values arr = pd.array([True, False, True], dtype="boolean") result = arr.astype("int64") expected = np.array([1, 0, 1], dtype="int64") tm.assert_numpy_array_equal(result, expected) result = arr.astype("bool") expected = np.array([True, False, True], dtype="bool") tm.assert_numpy_array_equal(result, expected) def test_astype_to_boolean_array(): # astype to BooleanArray arr = pd.array([True, False, None], dtype="boolean") result = arr.astype("boolean") tm.assert_extension_array_equal(result, arr) result = arr.astype(pd.BooleanDtype()) tm.assert_extension_array_equal(result, arr) def test_astype_to_integer_array(): # astype to IntegerArray arr = pd.array([True, False, None], dtype="boolean") result = arr.astype("Int64") expected = pd.array([1, 0, None], dtype="Int64") tm.assert_extension_array_equal(result, expected)
bsd-3-clause
Fireblend/scikit-learn
examples/applications/face_recognition.py
191
5513
""" =================================================== Faces recognition example using eigenfaces and SVMs =================================================== The dataset used in this example is a preprocessed excerpt of the "Labeled Faces in the Wild", aka LFW_: http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB) .. _LFW: http://vis-www.cs.umass.edu/lfw/ Expected results for the top 5 most represented people in the dataset:: precision recall f1-score support Ariel Sharon 0.67 0.92 0.77 13 Colin Powell 0.75 0.78 0.76 60 Donald Rumsfeld 0.78 0.67 0.72 27 George W Bush 0.86 0.86 0.86 146 Gerhard Schroeder 0.76 0.76 0.76 25 Hugo Chavez 0.67 0.67 0.67 15 Tony Blair 0.81 0.69 0.75 36 avg / total 0.80 0.80 0.80 322 """ from __future__ import print_function from time import time import logging import matplotlib.pyplot as plt from sklearn.cross_validation import train_test_split from sklearn.datasets import fetch_lfw_people from sklearn.grid_search import GridSearchCV from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.decomposition import RandomizedPCA from sklearn.svm import SVC print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') ############################################################################### # Download the data, if not already on disk and load it as numpy arrays lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape # for machine learning we use the 2 data directly (as relative pixel # positions info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the id of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print("Total dataset size:") print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) print("n_classes: %d" % n_classes) ############################################################################### # Split into a training set and a test set using a stratified k fold # split into a training and testing set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42) ############################################################################### # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled # dataset): unsupervised feature extraction / dimensionality reduction n_components = 150 print("Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])) t0 = time() pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train) print("done in %0.3fs" % (time() - t0)) eigenfaces = pca.components_.reshape((n_components, h, w)) print("Projecting the input data on the eigenfaces orthonormal basis") t0 = time() X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print("done in %0.3fs" % (time() - t0)) ############################################################################### # Train a SVM classification model print("Fitting the classifier to the training set") t0 = time() param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], } clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid) clf = clf.fit(X_train_pca, y_train) print("done in %0.3fs" % (time() - t0)) print("Best estimator found by grid search:") print(clf.best_estimator_) ############################################################################### # Quantitative evaluation of the model quality on the test set print("Predicting people's names on the test set") t0 = time() y_pred = clf.predict(X_test_pca) print("done in %0.3fs" % (time() - t0)) print(classification_report(y_test, y_pred, target_names=target_names)) print(confusion_matrix(y_test, y_pred, labels=range(n_classes))) ############################################################################### # Qualitative evaluation of the predictions using matplotlib def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return 'predicted: %s\ntrue: %s' % (pred_name, true_name) prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] plot_gallery(X_test, prediction_titles, h, w) # plot the gallery of the most significative eigenfaces eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) plt.show()
bsd-3-clause
MSeifert04/astropy
astropy/timeseries/sampled.py
3
16005
# Licensed under a 3-clause BSD style license - see LICENSE.rst from copy import deepcopy import numpy as np from astropy.table import groups, QTable, Table from astropy.time import Time, TimeDelta from astropy import units as u from astropy.units import Quantity, UnitsError from astropy.utils.decorators import deprecated_renamed_argument from astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns __all__ = ['TimeSeries'] @autocheck_required_columns class TimeSeries(BaseTimeSeries): """ A class to represent time series data in tabular form. `~astropy.timeseries.TimeSeries` provides a class for representing time series as a collection of values of different quantities measured at specific points in time (for time series with finite time bins, see the `~astropy.timeseries.BinnedTimeSeries` class). `~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable` and thus provides all the standard table maniplation methods available to tables, but it also provides additional conveniences for dealing with time series, such as a flexible initializer for setting up the times, a method for folding time series, and a ``time`` attribute for easy access to the time values. See also: http://docs.astropy.org/en/stable/timeseries/ Parameters ---------- data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional Data to initialize time series. This does not need to contain the times, which can be provided separately, but if it does contain the times they should be in a column called ``'time'`` to be automatically recognized. time : `~astropy.time.Time` or iterable The times at which the values are sampled - this can be either given directly as a `~astropy.time.Time` array or as any iterable that initializes the `~astropy.time.Time` class. If this is given, then the remaining time-related arguments should not be used. time_start : `~astropy.time.Time` or str The time of the first sample in the time series. This is an alternative to providing ``time`` and requires that ``time_delta`` is also provided. time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` The step size in time for the series. This can either be a scalar if the time series is evenly sampled, or an array of values if it is not. n_samples : int The number of time samples for the series. This is only used if both ``time_start`` and ``time_delta`` are provided and are scalar values. **kwargs : dict, optional Additional keyword arguments are passed to `~astropy.table.QTable`. """ _required_columns = ['time'] def __init__(self, data=None, *, time=None, time_start=None, time_delta=None, n_samples=None, **kwargs): super().__init__(data=data, **kwargs) # For some operations, an empty time series needs to be created, then # columns added one by one. We should check that when columns are added # manually, time is added first and is of the right type. if data is None and time is None and time_start is None and time_delta is None: self._required_columns_relax = True return # First if time has been given in the table data, we should extract it # and treat it as if it had been passed as a keyword argument. if data is not None: if n_samples is not None: if n_samples != len(self): raise TypeError("'n_samples' has been given both and it is not the " "same length as the input data.") else: n_samples = len(self) if 'time' in self.colnames: if time is None: time = self.columns['time'] else: raise TypeError("'time' has been given both in the table and as a keyword argument") if time is None and time_start is None: raise TypeError("Either 'time' or 'time_start' should be specified") elif time is not None and time_start is not None: raise TypeError("Cannot specify both 'time' and 'time_start'") if time is not None and not isinstance(time, Time): time = Time(time) if time_start is not None and not isinstance(time_start, Time): time_start = Time(time_start) if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)): raise TypeError("'time_delta' should be a Quantity or a TimeDelta") if isinstance(time_delta, TimeDelta): time_delta = time_delta.sec * u.s if time_start is not None: # We interpret this as meaning that time is that of the first # sample and that the interval is given by time_delta. if time_delta is None: raise TypeError("'time' is scalar, so 'time_delta' is required") if time_delta.isscalar: time_delta = np.repeat(time_delta, n_samples) time_delta = np.cumsum(time_delta) time_delta = np.roll(time_delta, 1) time_delta[0] = 0. * u.s time = time_start + time_delta elif len(self.colnames) > 0 and len(time) != len(self): raise ValueError("Length of 'time' ({}) should match " "data length ({})".format(len(time), n_samples)) elif time_delta is not None: raise TypeError("'time_delta' should not be specified since " "'time' is an array") with self._delay_required_column_checks(): if 'time' in self.colnames: self.remove_column('time') self.add_column(time, index=0, name='time') @property def time(self): """ The time values. """ return self['time'] @deprecated_renamed_argument('midpoint_epoch', 'epoch_time', '4.0') def fold(self, period=None, epoch_time=None, epoch_phase=0, wrap_phase=None, normalize_phase=False): """ Return a new `~astropy.timeseries.TimeSeries` folded with a period and epoch. Parameters ---------- period : `~astropy.units.Quantity` The period to use for folding epoch_time : `~astropy.time.Time` The time to use as the reference epoch, at which the relative time offset / phase will be ``epoch_phase``. Defaults to the first time in the time series. epoch_phase : float or `~astropy.units.Quantity` Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this should be a dimensionless value, while if ``normalize_phase`` is ``False``, this should be a `~astropy.units.Quantity` with time units. Defaults to 0. wrap_phase : float or `~astropy.units.Quantity` The value of the phase above which values are wrapped back by one period. If ``normalize_phase`` is `True`, this should be a dimensionless value, while if ``normalize_phase`` is ``False``, this should be a `~astropy.units.Quantity` with time units. Defaults to half the period, so that the resulting time series goes from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is `False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`). normalize_phase : bool If `False` phase is returned as `~astropy.time.TimeDelta`, otherwise as a dimensionless `~astropy.units.Quantity`. Returns ------- folded_timeseries : `~astropy.timeseries.TimeSeries` The folded time series object with phase as the ``time`` column. """ if not isinstance(period, Quantity) or period.unit.physical_type != 'time': raise UnitsError('period should be a Quantity in units of time') folded = self.copy() if epoch_time is None: epoch_time = self.time[0] else: epoch_time = Time(epoch_time) period_sec = period.to_value(u.s) if normalize_phase: if isinstance(epoch_phase, Quantity) and epoch_phase.unit.physical_type != 'dimensionless': raise UnitsError('epoch_phase should be a dimensionless Quantity ' 'or a float when normalize_phase=True') epoch_phase_sec = epoch_phase * period_sec else: if epoch_phase == 0: epoch_phase_sec = 0. else: if not isinstance(epoch_phase, Quantity) or epoch_phase.unit.physical_type != 'time': raise UnitsError('epoch_phase should be a Quantity in units ' 'of time when normalize_phase=False') epoch_phase_sec = epoch_phase.to_value(u.s) if wrap_phase is None: wrap_phase = period_sec / 2 else: if normalize_phase: if isinstance(wrap_phase, Quantity) and not wrap_phase.unit.is_equivalent(u.one): raise UnitsError('wrap_phase should be dimensionless when ' 'normalize_phase=True') else: if wrap_phase < 0 or wrap_phase > 1: raise ValueError('wrap_phase should be between 0 and 1') else: wrap_phase = wrap_phase * period_sec else: if isinstance(wrap_phase, Quantity) and wrap_phase.unit.physical_type == 'time': if wrap_phase < 0 or wrap_phase > period: raise ValueError('wrap_phase should be between 0 and the period') else: wrap_phase = wrap_phase.to_value(u.s) else: raise UnitsError('wrap_phase should be a Quantity in units ' 'of time when normalize_phase=False') relative_time_sec = (((self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase)) % period_sec - (period_sec - wrap_phase)) folded_time = TimeDelta(relative_time_sec * u.s) if normalize_phase: folded_time = (folded_time / period).decompose() period = period_sec = 1 with folded._delay_required_column_checks(): folded.remove_column('time') folded.add_column(folded_time, name='time', index=0) return folded def __getitem__(self, item): if self._is_list_or_tuple_of_str(item): if 'time' not in item: out = QTable([self[x] for x in item], meta=deepcopy(self.meta), copy_indices=self._copy_indices) out._groups = groups.TableGroups(out, indices=self.groups._indices, keys=self.groups._keys) return out return super().__getitem__(item) def add_column(self, *args, **kwargs): """ See :meth:`~astropy.table.Table.add_column`. """ # Note that the docstring is inherited from QTable result = super().add_column(*args, **kwargs) if len(self.indices) == 0 and 'time' in self.colnames: self.add_index('time') return result def add_columns(self, *args, **kwargs): """ See :meth:`~astropy.table.Table.add_columns`. """ # Note that the docstring is inherited from QTable result = super().add_columns(*args, **kwargs) if len(self.indices) == 0 and 'time' in self.colnames: self.add_index('time') return result @classmethod def from_pandas(self, df, time_scale='utc'): """ Convert a :class:`~pandas.DataFrame` to a :class:`astropy.timeseries.TimeSeries`. Parameters ---------- df : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance. time_scale : str The time scale to pass into `astropy.time.Time`. Defaults to ``UTC``. """ from pandas import DataFrame, DatetimeIndex if not isinstance(df, DataFrame): raise TypeError("Input should be a pandas DataFrame") if not isinstance(df.index, DatetimeIndex): raise TypeError("DataFrame does not have a DatetimeIndex") time = Time(df.index, scale=time_scale) table = Table.from_pandas(df) return TimeSeries(time=time, data=table) def to_pandas(self): """ Convert this :class:`~astropy.timeseries.TimeSeries` to a :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index. Returns ------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance """ return Table(self).to_pandas(index='time') @classmethod def read(self, filename, time_column=None, time_format=None, time_scale=None, format=None, *args, **kwargs): """ Read and parse a file and returns a `astropy.timeseries.TimeSeries`. This method uses the unified I/O infrastructure in Astropy which makes it easy to define readers/writers for various classes (http://docs.astropy.org/en/stable/io/unified.html). By default, this method will try and use readers defined specifically for the `astropy.timeseries.TimeSeries` class - however, it is also possible to use the ``format`` keyword to specify formats defined for the `astropy.table.Table` class - in this case, you will need to also provide the column names for column containing the start times for the bins, as well as other column names (see the Parameters section below for details):: >>> from astropy.timeseries import TimeSeries >>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv', ... time_column='date') # doctest: +SKIP Parameters ---------- filename : str File to parse. format : str File format specifier. time_column : str, optional The name of the time column. time_format : str, optional The time format for the time column. time_scale : str, optional The time scale for the time column. *args : tuple, optional Positional arguments passed through to the data reader. **kwargs : dict, optional Keyword arguments passed through to the data reader. Returns ------- out : `astropy.timeseries.sampled.TimeSeries` TimeSeries corresponding to file contents. Notes ----- """ try: # First we try the readers defined for the BinnedTimeSeries class return super().read(filename, format=format, *args, **kwargs) except TypeError: # Otherwise we fall back to the default Table readers if time_column is None: raise ValueError("``time_column`` should be provided since the default Table readers are being used.") table = Table.read(filename, format=format, *args, **kwargs) if time_column in table.colnames: time = Time(table.columns[time_column], scale=time_scale, format=time_format) table.remove_column(time_column) else: raise ValueError(f"Time column '{time_column}' not found in the input data.") return TimeSeries(time=time, data=table)
bsd-3-clause
florentchandelier/zipline
tests/utils/test_pandas_utils.py
2
3367
""" Tests for zipline/utils/pandas_utils.py """ import pandas as pd from zipline.testing import parameter_space, ZiplineTestCase from zipline.utils.pandas_utils import nearest_unequal_elements class TestNearestUnequalElements(ZiplineTestCase): @parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True) def test_nearest_unequal_elements(self, tz): dts = pd.to_datetime( ['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'], ).tz_localize(tz) def t(s): return None if s is None else pd.Timestamp(s, tz=tz) for dt, before, after in (('2013-12-30', None, '2014-01-01'), ('2013-12-31', None, '2014-01-01'), ('2014-01-01', None, '2014-01-05'), ('2014-01-02', '2014-01-01', '2014-01-05'), ('2014-01-03', '2014-01-01', '2014-01-05'), ('2014-01-04', '2014-01-01', '2014-01-05'), ('2014-01-05', '2014-01-01', '2014-01-06'), ('2014-01-06', '2014-01-05', '2014-01-09'), ('2014-01-07', '2014-01-06', '2014-01-09'), ('2014-01-08', '2014-01-06', '2014-01-09'), ('2014-01-09', '2014-01-06', None), ('2014-01-10', '2014-01-09', None), ('2014-01-11', '2014-01-09', None)): computed = nearest_unequal_elements(dts, t(dt)) expected = (t(before), t(after)) self.assertEqual(computed, expected) @parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True) def test_nearest_unequal_elements_short_dts(self, tz): # Length 1. dts = pd.to_datetime(['2014-01-01']).tz_localize(tz) def t(s): return None if s is None else pd.Timestamp(s, tz=tz) for dt, before, after in (('2013-12-31', None, '2014-01-01'), ('2014-01-01', None, None), ('2014-01-02', '2014-01-01', None)): computed = nearest_unequal_elements(dts, t(dt)) expected = (t(before), t(after)) self.assertEqual(computed, expected) # Length 0 dts = pd.to_datetime([]).tz_localize(tz) for dt, before, after in (('2013-12-31', None, None), ('2014-01-01', None, None), ('2014-01-02', None, None)): computed = nearest_unequal_elements(dts, t(dt)) expected = (t(before), t(after)) self.assertEqual(computed, expected) def test_nearest_unequal_bad_input(self): with self.assertRaises(ValueError) as e: nearest_unequal_elements( pd.to_datetime(['2014', '2014']), pd.Timestamp('2014'), ) self.assertEqual(str(e.exception), 'dts must be unique') with self.assertRaises(ValueError) as e: nearest_unequal_elements( pd.to_datetime(['2014', '2013']), pd.Timestamp('2014'), ) self.assertEqual( str(e.exception), 'dts must be sorted in increasing order', )
apache-2.0
ornlneutronimaging/ResoFit
ResoFit/data/IPTS_13639/ipts_13639_Ag.py
1
3859
from ResoFit.calibration import Calibration from ResoFit.fitresonance import FitResonance import matplotlib.pyplot as plt import numpy as np from ResoFit._utilities import get_foil_density_gcm3 from ResoFit._utilities import Layer import pprint # Global parameters energy_min = 4.09 energy_max = 1000 energy_step = 0.01 # Input sample name or names as str, case sensitive layers = Layer() layers.add_layer(layer='Ag', thickness_mm=0.025) # layers.add_layer(layer='Co', thickness_mm=0.025) # layers.add_layer(layer='Hf', thickness_mm=0.025) # layers.add_layer(layer='W', thickness_mm=0.05) # layers.add_layer(layer='In', thickness_mm=0.05) # layers.add_layer(layer='Cd', thickness_mm=0.5) # layers.add_layer(layer='Au', thickness_mm=0.01) # simu = Simulation(energy_min=energy_min, energy_max=energy_max, energy_step=energy_step) # simu.add_Layer(layer=layers) # peak_dict = simu.peak_map(thres=0.015, min_dist=20) # pprint.pprint(peak_dict) folder = 'data/IPTS_13639/reso_data_13639' data_file = 'Ag.csv' spectra_file = 'spectra.csv' image_start = 300 # Can be omitted or =None image_end = 2700 # Can be omitted or =None # norm_to_file = 'ob_1.csv' #'Ag.csv' # norm_to_file = 'Ag.csv' norm_to_file = 'ob_all.csv' baseline = False each_step = False norm_factor = 1.2 source_to_detector_m = 16.126845685903064 # 16#16.445359069030175#16.447496101100739 offset_us = -12112.431834715671 # 0#2.7120797253959119#2.7355447625559037 # Calibrate the peak positions calibration = Calibration(data_file=data_file, spectra_file=spectra_file, layer=layers, energy_min=energy_min, energy_max=energy_max, energy_step=energy_step, folder=folder, baseline=baseline) calibration.experiment.norm_to(norm_to_file, norm_factor=norm_factor) calibration.experiment.slice(start=image_start, end=image_end) calibrate_result = calibration.calibrate(source_to_detector_m=source_to_detector_m, offset_us=offset_us, vary='all', each_step=each_step) calibration.index_peak(thres_exp=0.05, min_dist_exp=2, min_dist_map=5, thres_map=0.05) # calibration.analyze_peak() calibration.plot( y_type='attenuation', # y_type='transmission', x_type='energy', # t_unit='ms', # before=True, # interp=True, # mixed=True, # peak_exp='all', # table=False, peak_exp='indexed', peak_height=False, index_level='iso', peak_id='all', logx=True, ) plt.xlim(left=0, right=1000) # plt.show() df = calibration.export(y_type='attenuation', # y_type='transmission', x_type='energy', # t_unit='ms', # before=True, # interp=True, # mixed=True, index_level='iso', peak_id='all') # # Fit the peak height # fit = FitResonance(folder=folder, # spectra_file=spectra_file, # data_file=data_file, # repeat=repeat, # energy_min=energy_min, # energy_max=energy_max, # energy_step=energy_step, # calibrated_offset_us=calibration.calibrated_offset_us, # calibrated_source_to_detector_m=calibration.calibrated_source_to_detector_m, # norm_to_file=norm_to_file, # slice_start=image_start, # slice_end=image_end, # baseline=baseline) # fit_result = fit.fit(layer, vary='thickness', each_step=each_step) # fit.molar_conc() # fit.plot() #
bsd-3-clause
1412kid/computationalphysics_n2014301020035
chapter6/chapter6_6.16.py
2
4793
# -*- coding: utf-8 -*- """ Created on Fri May 27 16:02:59 2016 @author: AF """ import matplotlib.pyplot as plt from matplotlib import animation import numpy as np from scipy.fftpack import rfft, irfft, fftfreq class Guass_seidel: def __init__(self,L,dx,T,e): self.L = L self.dx = dx self.r = 0.25 self.c = 300 self.T = T self.dt = self.dx/(4*self.c) self.e = e #epsilon pass def initialization(self): self.string = [] for i in range(int(self.T/self.dt)): self.string.append([]) for j in range(int(self.L/self.dx)): self.string[i].append(0) ''' x_0 = int((self.L/self.dx)/8) self.string[1][x_0] = 0.1 k1 = 0.1/(x_0 - 1) k2 = 0.1/(int(self.L/self.dx) - 1 - x_0) for l in range(1,x_0): self.string[1][l] = k1*(l - 1) for m in range(x_0,int(self.L/self.dx) - 1): self.string[1][m] = 0.1 - k2*(m - x_0) self.string[1][0] = -self.string[1][2] self.string[1][-1] = -self.string[1][-3] #plt.plot(self.string[1]) self.string[0] = self.string[1][:] ''' for l in range(1,int(int(self.L/self.dx)) - 1): self.string[0][l] = np.sin(10*np.pi/(48.5)*(l - 1)) self.string[1][l] = np.sin(10*np.pi/(48.5)*(l - 1)) self.string[0][0] = - self.string[0][2] self.string[0][-1] = - self.string[0][-3] self.string[1][0] = - self.string[1][2] self.string[1][-1] = - self.string[1][-3] #plt.plot(self.string[1]) #print self.string[1] ''' for l in range(1,int(int(self.L/self.dx) - 1)): self.string[0][l] = np.e**(-0.1*(l - 30)**2) self.string[0][1] = 0 self.string[0][-2] = 0 self.string[0][0] = - self.string[0][2] self.string[0][-1] = - self.string[0][-3] self.string[1] = self.string[0][:] #plt.plot(self.string[1]) ''' return 0 def calculation(self): print 1 M = int(self.L/self.dx) for i in range(2,int(self.T/self.dt)): for j in range(2,M-2): self.string[i][j] = (2 - 2*self.r**2 - 6*self.e*self.r**2*M**2)*self.string[i - 1][j] - self.string[i - 2][j] + self.r**2*(1 + 4*self.e*M**2)*(self.string[i-1][j+1]+self.string[i-1][j-1]) - self.e*self.r**2*M**2*(self.string[i-1][j+2]+self.string[i-1][j-2]) #self.string[i][j] = 2*(1-self.r**2)*self.string[i - 1][j] - self.string[i - 2][j] + self.r**2*(self.string[i - 1][j + 1] +self.string[i - 1][j - 1]) self.string[i][0] = -self.string[i][2] self.string[i][-1] = -self.string[i][-3] #print self.string #plt.plot(self.string[-1]) return self.string def fft(self): t_plot = np.arange(0,self.T,self.dt) print len(t_plot) amplitude_record = [] for i in range(int(self.T/self.dt)): amplitude_record.append(self.string[i][20]) print len(amplitude_record) ''' plt.subplot(121) plt.title('String signal versus time') plt.ylabel('Signal (arbitrary units)') plt.xlabel('Time (s)') plt.plot(t_plot,amplitude_record) ''' freq = fftfreq(len(amplitude_record), d=self.dt) freq = np.array(abs(freq)) f_signal = rfft(amplitude_record) f_signal = np.array(f_signal**2) #plt.subplot(122) plt.title('Power spectra') plt.ylabel('Power (arbitrary units)') plt.xlabel('Frequency (Hz)') plt.xlim(2000,8000) plt.plot(freq,f_signal,label = 'epsilon = '+str(self.e)) return 0 plt.figure(figsize = (8,8)) A = Guass_seidel(1,0.01,0.05,0) A.initialization() data_record = A.calculation() A.fft() B = Guass_seidel(1,0.01,0.05,1e-5) B.initialization() data_record = B.calculation() B.fft() C = Guass_seidel(1,0.01,0.05,2e-5) C.initialization() data_record = C.calculation() C.fft() plt.legend() plt.savefig('chapter6_6.16_highsine_c.png',dpi = 144) plt.show() ''' # ---- animation --------------------------------- fig = plt.figure(figsize = (8,6)) ax = plt.axes(xlim=(0, 1),ylim = (-1,1)) line, = ax.plot([], [], 'k') def init(): line.set_data([], []) return line, def animate(i): x_plot = np.arange(0.01,0.99,0.01) y_plot = [] for j in range(1, int(A.L/A.dx) - 1): y_plot.append(data_record[i][j]) line.set_data(x_plot,y_plot) return line, anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=50, blit=True) anim.save('chapter6_string_guass_e.gif', fps=20, writer='Feng_Chen') plt.show() '''
mit
schevalier/Whetlab-Python-Client
examples/mnist_random_forest.py
1
1503
# This example demonstrates the usage of spearmint within the context # of sklearn. # Here we train a random forest classifier on the MNIST dataset. import whetlab import numpy as np # Define parameters to optimize parameters = { 'n_estimators':{'type':'integer', 'min':2, 'max':100, 'size':1}, 'max_depth':{'type':'integer', 'min':1, 'max':20, 'size':1}} outcome = {'name':'Classification accuracy'} name = 'Random Forest' description = 'Training a random forest on the MNIST dataset using the sklearn library' access_token = None # PUT VALID ACCESS TOKEN HERE OR IN YOUR ~/.whetlab FILE scientist = whetlab.Experiment(name=name, description=description, access_token=access_token, parameters=parameters, outcome=outcome) # Setup scikit-learn experiment from sklearn.datasets import fetch_mldata from sklearn.ensemble import RandomForestClassifier # Download the mnist dataset to the current working directory mnist = fetch_mldata('MNIST original', data_home='.') order = np.random.permutation(60000) train_set = [mnist.data[order[:50000],:], mnist.target[order[:50000]]] valid_set = [mnist.data[order[50000:60000],:], mnist.target[order[50000:60000]]] for i in range(20): # Get suggested new experiment job = scientist.suggest() # Perform experiment learner = RandomForestClassifier(**job) learner.fit(*train_set) accuracy = learner.score(*valid_set) # Inform scientist about the outcome scientist.update(job,accuracy) scientist.report()
bsd-3-clause
IndraVikas/scikit-learn
sklearn/tests/test_calibration.py
213
12219
# Authors: Alexandre Gramfort <[email protected]> # License: BSD 3 clause import numpy as np from scipy import sparse from sklearn.utils.testing import (assert_array_almost_equal, assert_equal, assert_greater, assert_almost_equal, assert_greater_equal, assert_array_equal, assert_raises, assert_warns_message) from sklearn.datasets import make_classification, make_blobs from sklearn.naive_bayes import MultinomialNB from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from sklearn.svm import LinearSVC from sklearn.linear_model import Ridge from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer from sklearn.metrics import brier_score_loss, log_loss from sklearn.calibration import CalibratedClassifierCV from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration from sklearn.calibration import calibration_curve def test_calibration(): """Test calibration objects with isotonic and sigmoid""" n_samples = 100 X, y = make_classification(n_samples=2 * n_samples, n_features=6, random_state=42) sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) X -= X.min() # MultinomialNB only allows positive X # split train and test X_train, y_train, sw_train = \ X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_test, y_test = X[n_samples:], y[n_samples:] # Naive-Bayes clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train) prob_pos_clf = clf.predict_proba(X_test)[:, 1] pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1) assert_raises(ValueError, pc_clf.fit, X, y) # Naive Bayes with calibration for this_X_train, this_X_test in [(X_train, X_test), (sparse.csr_matrix(X_train), sparse.csr_matrix(X_test))]: for method in ['isotonic', 'sigmoid']: pc_clf = CalibratedClassifierCV(clf, method=method, cv=2) # Note that this fit overwrites the fit on the entire training # set pc_clf.fit(this_X_train, y_train, sample_weight=sw_train) prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1] # Check that brier score has improved after calibration assert_greater(brier_score_loss(y_test, prob_pos_clf), brier_score_loss(y_test, prob_pos_pc_clf)) # Check invariance against relabeling [0, 1] -> [1, 2] pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train) prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1] assert_array_almost_equal(prob_pos_pc_clf, prob_pos_pc_clf_relabeled) # Check invariance against relabeling [0, 1] -> [-1, 1] pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train) prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1] assert_array_almost_equal(prob_pos_pc_clf, prob_pos_pc_clf_relabeled) # Check invariance against relabeling [0, 1] -> [1, 0] pc_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train) prob_pos_pc_clf_relabeled = \ pc_clf.predict_proba(this_X_test)[:, 1] if method == "sigmoid": assert_array_almost_equal(prob_pos_pc_clf, 1 - prob_pos_pc_clf_relabeled) else: # Isotonic calibration is not invariant against relabeling # but should improve in both cases assert_greater(brier_score_loss(y_test, prob_pos_clf), brier_score_loss((y_test + 1) % 2, prob_pos_pc_clf_relabeled)) # check that calibration can also deal with regressors that have # a decision_function clf_base_regressor = CalibratedClassifierCV(Ridge()) clf_base_regressor.fit(X_train, y_train) clf_base_regressor.predict(X_test) # Check failure cases: # only "isotonic" and "sigmoid" should be accepted as methods clf_invalid_method = CalibratedClassifierCV(clf, method="foo") assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train) # base-estimators should provide either decision_function or # predict_proba (most regressors, for instance, should fail) clf_base_regressor = \ CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid") assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train) def test_sample_weight_warning(): n_samples = 100 X, y = make_classification(n_samples=2 * n_samples, n_features=6, random_state=42) sample_weight = np.random.RandomState(seed=42).uniform(size=len(y)) X_train, y_train, sw_train = \ X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_test = X[n_samples:] for method in ['sigmoid', 'isotonic']: base_estimator = LinearSVC(random_state=42) calibrated_clf = CalibratedClassifierCV(base_estimator, method=method) # LinearSVC does not currently support sample weights but they # can still be used for the calibration step (with a warning) msg = "LinearSVC does not support sample_weight." assert_warns_message( UserWarning, msg, calibrated_clf.fit, X_train, y_train, sample_weight=sw_train) probs_with_sw = calibrated_clf.predict_proba(X_test) # As the weights are used for the calibration, they should still yield # a different predictions calibrated_clf.fit(X_train, y_train) probs_without_sw = calibrated_clf.predict_proba(X_test) diff = np.linalg.norm(probs_with_sw - probs_without_sw) assert_greater(diff, 0.1) def test_calibration_multiclass(): """Test calibration for multiclass """ # test multi-class setting with classifier that implements # only decision function clf = LinearSVC() X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42, centers=3, cluster_std=3.0) # Use categorical labels to check that CalibratedClassifierCV supports # them correctly target_names = np.array(['a', 'b', 'c']) y = target_names[y_idx] X_train, y_train = X[::2], y[::2] X_test, y_test = X[1::2], y[1::2] clf.fit(X_train, y_train) for method in ['isotonic', 'sigmoid']: cal_clf = CalibratedClassifierCV(clf, method=method, cv=2) cal_clf.fit(X_train, y_train) probas = cal_clf.predict_proba(X_test) assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test))) # Check that log-loss of calibrated classifier is smaller than # log-loss of naively turned OvR decision function to probabilities # via softmax def softmax(y_pred): e = np.exp(-y_pred) return e / e.sum(axis=1).reshape(-1, 1) uncalibrated_log_loss = \ log_loss(y_test, softmax(clf.decision_function(X_test))) calibrated_log_loss = log_loss(y_test, probas) assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss) # Test that calibration of a multiclass classifier decreases log-loss # for RandomForestClassifier X, y = make_blobs(n_samples=100, n_features=2, random_state=42, cluster_std=3.0) X_train, y_train = X[::2], y[::2] X_test, y_test = X[1::2], y[1::2] clf = RandomForestClassifier(n_estimators=10, random_state=42) clf.fit(X_train, y_train) clf_probs = clf.predict_proba(X_test) loss = log_loss(y_test, clf_probs) for method in ['isotonic', 'sigmoid']: cal_clf = CalibratedClassifierCV(clf, method=method, cv=3) cal_clf.fit(X_train, y_train) cal_clf_probs = cal_clf.predict_proba(X_test) cal_loss = log_loss(y_test, cal_clf_probs) assert_greater(loss, cal_loss) def test_calibration_prefit(): """Test calibration for prefitted classifiers""" n_samples = 50 X, y = make_classification(n_samples=3 * n_samples, n_features=6, random_state=42) sample_weight = np.random.RandomState(seed=42).uniform(size=y.size) X -= X.min() # MultinomialNB only allows positive X # split train and test X_train, y_train, sw_train = \ X[:n_samples], y[:n_samples], sample_weight[:n_samples] X_calib, y_calib, sw_calib = \ X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \ sample_weight[n_samples:2 * n_samples] X_test, y_test = X[2 * n_samples:], y[2 * n_samples:] # Naive-Bayes clf = MultinomialNB() clf.fit(X_train, y_train, sw_train) prob_pos_clf = clf.predict_proba(X_test)[:, 1] # Naive Bayes with calibration for this_X_calib, this_X_test in [(X_calib, X_test), (sparse.csr_matrix(X_calib), sparse.csr_matrix(X_test))]: for method in ['isotonic', 'sigmoid']: pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit") for sw in [sw_calib, None]: pc_clf.fit(this_X_calib, y_calib, sample_weight=sw) y_prob = pc_clf.predict_proba(this_X_test) y_pred = pc_clf.predict(this_X_test) prob_pos_pc_clf = y_prob[:, 1] assert_array_equal(y_pred, np.array([0, 1])[np.argmax(y_prob, axis=1)]) assert_greater(brier_score_loss(y_test, prob_pos_clf), brier_score_loss(y_test, prob_pos_pc_clf)) def test_sigmoid_calibration(): """Test calibration values with Platt sigmoid model""" exF = np.array([5, -4, 1.0]) exY = np.array([1, -1, -1]) # computed from my python port of the C++ code in LibSVM AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512]) assert_array_almost_equal(AB_lin_libsvm, _sigmoid_calibration(exF, exY), 3) lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1])) sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF) assert_array_almost_equal(lin_prob, sk_prob, 6) # check that _SigmoidCalibration().fit only accepts 1d array or 2d column # arrays assert_raises(ValueError, _SigmoidCalibration().fit, np.vstack((exF, exF)), exY) def test_calibration_curve(): """Check calibration_curve function""" y_true = np.array([0, 0, 0, 1, 1, 1]) y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.]) prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2) prob_true_unnormalized, prob_pred_unnormalized = \ calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True) assert_equal(len(prob_true), len(prob_pred)) assert_equal(len(prob_true), 2) assert_almost_equal(prob_true, [0, 1]) assert_almost_equal(prob_pred, [0.1, 0.9]) assert_almost_equal(prob_true, prob_true_unnormalized) assert_almost_equal(prob_pred, prob_pred_unnormalized) # probabilities outside [0, 1] should not be accepted when normalize # is set to False assert_raises(ValueError, calibration_curve, [1.1], [-0.1], normalize=False) def test_calibration_nan_imputer(): """Test that calibration can accept nan""" X, y = make_classification(n_samples=10, n_features=2, n_informative=2, n_redundant=0, random_state=42) X[0, 0] = np.nan clf = Pipeline( [('imputer', Imputer()), ('rf', RandomForestClassifier(n_estimators=1))]) clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic') clf_c.fit(X, y) clf_c.predict(X)
bsd-3-clause
jseabold/statsmodels
statsmodels/multivariate/manova.py
5
4321
# -*- coding: utf-8 -*- """Multivariate analysis of variance author: Yichuan Liu """ import numpy as np from statsmodels.compat.pandas import Substitution from statsmodels.base.model import Model from .multivariate_ols import MultivariateTestResults from .multivariate_ols import _multivariate_ols_fit from .multivariate_ols import _multivariate_ols_test, _hypotheses_doc __docformat__ = 'restructuredtext en' class MANOVA(Model): """ Multivariate Analysis of Variance The implementation of MANOVA is based on multivariate regression and does not assume that the explanatory variables are categorical. Any type of variables as in regression is allowed. Parameters ---------- endog : array_like Dependent variables. A nobs x k_endog array where nobs is the number of observations and k_endog is the number of dependent variables. exog : array_like Independent variables. A nobs x k_exog array where nobs is the number of observations and k_exog is the number of independent variables. An intercept is not included by default and should be added by the user. Models specified using a formula include an intercept by default. Attributes ---------- endog : ndarray See Parameters. exog : ndarray See Parameters. Notes ----- MANOVA is used though the `mv_test` function, and `fit` is not used. The ``from_formula`` interface is the recommended method to specify a model and simplifies testing without needing to manually configure the contrast matrices. References ---------- .. [*] ftp://public.dhe.ibm.com/software/analytics/spss/documentation/ statistics/20.0/en/client/Manuals/IBM_SPSS_Statistics_Algorithms.pdf """ _formula_max_endog = None def __init__(self, endog, exog, missing='none', hasconst=None, **kwargs): if len(endog.shape) == 1 or endog.shape[1] == 1: raise ValueError('There must be more than one dependent variable' ' to fit MANOVA!') super(MANOVA, self).__init__(endog, exog, missing=missing, hasconst=hasconst, **kwargs) self._fittedmod = _multivariate_ols_fit(self.endog, self.exog) def fit(self): raise NotImplementedError('fit is not needed to use MANOVA. Call' 'mv_test directly on a MANOVA instance.') @Substitution(hypotheses_doc=_hypotheses_doc) def mv_test(self, hypotheses=None): """ Linear hypotheses testing Parameters ---------- %(hypotheses_doc)s Returns ------- results: MultivariateTestResults Notes ----- Testing the linear hypotheses L * params * M = 0 where `params` is the regression coefficient matrix for the linear model y = x * params If the model is not specified using the formula interfact, then the hypotheses test each included exogenous variable, one at a time. In most applications with categorical variables, the ``from_formula`` interface should be preferred when specifying a model since it provides knowledge about the model when specifying the hypotheses. """ if hypotheses is None: if (hasattr(self, 'data') and self.data is not None and hasattr(self.data, 'design_info')): terms = self.data.design_info.term_name_slices hypotheses = [] for key in terms: L_contrast = np.eye(self.exog.shape[1])[terms[key], :] hypotheses.append([key, L_contrast, None]) else: hypotheses = [] for i in range(self.exog.shape[1]): name = 'x%d' % (i) L = np.zeros([1, self.exog.shape[1]]) L[0, i] = 1 hypotheses.append([name, L, None]) results = _multivariate_ols_test(hypotheses, self._fittedmod, self.exog_names, self.endog_names) return MultivariateTestResults(results, self.endog_names, self.exog_names)
bsd-3-clause
pdamodaran/yellowbrick
yellowbrick/utils/types.py
1
6178
# yellowbrick.utils.types # Detection utilities for Scikit-Learn and Numpy types for flexibility # # Author: Benjamin Bengfort <[email protected]> # Created: Fri May 19 10:51:13 2017 -0700 # # Copyright (C) 2017 District Data Labs # For license information, see LICENSE.txt # # ID: types.py [79cd8cf] [email protected] $ """ Detection utilities for Scikit-Learn and Numpy types for flexibility """ ########################################################################## ## Imports ########################################################################## import inspect import numpy as np from sklearn.base import BaseEstimator ########################################################################## ## Model Type checking utilities ########################################################################## def is_estimator(model): """ Determines if a model is an estimator using issubclass and isinstance. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer """ if inspect.isclass(model): return issubclass(model, BaseEstimator) return isinstance(model, BaseEstimator) # Alias for closer name to isinstance and issubclass isestimator = is_estimator def is_classifier(estimator): """ Returns True if the given estimator is (probably) a classifier. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer See also -------- is_classifier `sklearn.is_classifier() <https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py#L518>`_ """ # Test the _estimator_type property return getattr(estimator, "_estimator_type", None) == "classifier" # Alias for closer name to isinstance and issubclass isclassifier = is_classifier def is_regressor(estimator): """ Returns True if the given estimator is (probably) a regressor. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer See also -------- is_regressor `sklearn.is_regressor() <https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py#L531>`_ """ # Test the _estimator_type property return getattr(estimator, "_estimator_type", None) == "regressor" # Alias for closer name to isinstance and issubclass isregressor = is_regressor def is_clusterer(estimator): """ Returns True if the given estimator is a clusterer. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer """ # Test the _estimator_type property return getattr(estimator, "_estimator_type", None) == "clusterer" # Alias for closer name to isinstance and issubclass isclusterer = is_clusterer def is_gridsearch(estimator): """ Returns True if the given estimator is a clusterer. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer """ from sklearn.model_selection import GridSearchCV, RandomizedSearchCV if inspect.isclass(estimator): return issubclass(estimator, (GridSearchCV, RandomizedSearchCV)) return isinstance(estimator, (GridSearchCV, RandomizedSearchCV)) # Alias for closer name to isinstance and issubclass isgridsearch = is_gridsearch def is_probabilistic(estimator): """ Returns True if the given estimator returns a y_score for it's decision function, e.g. has ``predict_proba`` or ``decision_function`` methods. Parameters ---------- estimator : class or instance The object to test if is probabilistic, especially a Scikit-Learn estimator or Yellowbrick visualizer. """ return any([ hasattr(estimator, 'predict_proba'), hasattr(estimator, 'decision_function'), ]) # Alias for closer name to isinstance and issubclass isprobabilistic = is_probabilistic ########################################################################## ## Data Type checking utilities ########################################################################## def is_dataframe(obj): """ Returns True if the given object is a Pandas Data Frame. Parameters ---------- obj: instance The object to test whether or not is a Pandas DataFrame. """ try: # This is the best method of type checking from pandas import DataFrame return isinstance(obj, DataFrame) except ImportError: # Pandas is not a dependency, so this is scary return obj.__class__.__name__ == "DataFrame" # Alias for closer name to isinstance and issubclass isdataframe = is_dataframe def is_series(obj): """ Returns True if the given object is a Pandas Series. Parameters ---------- obj: instance The object to test whether or not is a Pandas Series. """ try: # This is the best method of type checking from pandas import Series return isinstance(obj, Series) except ImportError: # Pandas is not a dependency, so this is scary return obj.__class__.__name__ == "Series" # Alias for closer name to isinstance and issubclass isseries = is_series def is_structured_array(obj): """ Returns True if the given object is a Numpy Structured Array. Parameters ---------- obj: instance The object to test whether or not is a Numpy Structured Array. """ if isinstance(obj, np.ndarray) and hasattr(obj, 'dtype'): if obj.dtype.names is not None: return True return False # Alias for closer name to isinstance and issubclass isstructuredarray = is_structured_array
apache-2.0
billy-inn/scikit-learn
sklearn/tests/test_pipeline.py
162
14875
""" Test the pipeline module. """ import numpy as np from scipy import sparse from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.base import clone from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.cluster import KMeans from sklearn.feature_selection import SelectKBest, f_classif from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD from sklearn.datasets import load_iris from sklearn.preprocessing import StandardScaler from sklearn.feature_extraction.text import CountVectorizer JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) class IncorrectT(object): """Small class to test parameter dispatching. """ def __init__(self, a=None, b=None): self.a = a self.b = b class T(IncorrectT): def fit(self, X, y): return self def get_params(self, deep=False): return {'a': self.a, 'b': self.b} def set_params(self, **params): self.a = params['a'] return self class TransfT(T): def transform(self, X, y=None): return X class FitParamT(object): """Mock classifier """ def __init__(self): self.successful = False pass def fit(self, X, y, should_succeed=False): self.successful = should_succeed def predict(self, X): return self.successful def test_pipeline_init(): # Test the various init parameters of the pipeline. assert_raises(TypeError, Pipeline) # Check that we can't instantiate pipelines with objects without fit # method pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)]) # Smoke test with only an estimator clf = T() pipe = Pipeline([('svc', clf)]) assert_equal(pipe.get_params(deep=True), dict(svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False) )) # Check that params are set pipe.set_params(svc__a=0.1) assert_equal(clf.a, 0.1) assert_equal(clf.b, None) # Smoke test the repr: repr(pipe) # Test with two objects clf = SVC() filter1 = SelectKBest(f_classif) pipe = Pipeline([('anova', filter1), ('svc', clf)]) # Check that we can't use the same stage name twice assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())]) # Check that params are set pipe.set_params(svc__C=0.1) assert_equal(clf.C, 0.1) # Smoke test the repr: repr(pipe) # Check that params are not set when naming them wrong assert_raises(ValueError, pipe.set_params, anova__C=0.1) # Test clone pipe2 = clone(pipe) assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc']) # Check that apart from estimators, the parameters are the same params = pipe.get_params(deep=True) params2 = pipe2.get_params(deep=True) for x in pipe.get_params(deep=False): params.pop(x) for x in pipe2.get_params(deep=False): params2.pop(x) # Remove estimators that where copied params.pop('svc') params.pop('anova') params2.pop('svc') params2.pop('anova') assert_equal(params, params2) def test_pipeline_methods_anova(): # Test the various methods of the pipeline (anova). iris = load_iris() X = iris.data y = iris.target # Test with Anova + LogisticRegression clf = LogisticRegression() filter1 = SelectKBest(f_classif, k=2) pipe = Pipeline([('anova', filter1), ('logistic', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_fit_params(): # Test that the pipeline can take fit parameters pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())]) pipe.fit(X=None, y=None, clf__should_succeed=True) # classifier should return True assert_true(pipe.predict(None)) # and transformer params should not be changed assert_true(pipe.named_steps['transf'].a is None) assert_true(pipe.named_steps['transf'].b is None) def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([('cls', LinearRegression())]) # expected error message error_msg = ('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.') assert_raise_message(ValueError, error_msg % ('fake', 'Pipeline'), pipe.set_params, fake='nope') # nested model check assert_raise_message(ValueError, error_msg % ("fake", pipe), pipe.set_params, fake__estimator='nope') def test_pipeline_methods_pca_svm(): # Test the various methods of the pipeline (pca + svm). iris = load_iris() X = iris.data y = iris.target # Test with PCA + SVC clf = SVC(probability=True, random_state=0) pca = PCA(n_components='mle', whiten=True) pipe = Pipeline([('pca', pca), ('svc', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_methods_preprocessing_svm(): # Test the various methods of the pipeline (preprocessing + svm). iris = load_iris() X = iris.data y = iris.target n_samples = X.shape[0] n_classes = len(np.unique(y)) scaler = StandardScaler() pca = RandomizedPCA(n_components=2, whiten=True) clf = SVC(probability=True, random_state=0) for preprocessing in [scaler, pca]: pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)]) pipe.fit(X, y) # check shapes of various prediction functions predict = pipe.predict(X) assert_equal(predict.shape, (n_samples,)) proba = pipe.predict_proba(X) assert_equal(proba.shape, (n_samples, n_classes)) log_proba = pipe.predict_log_proba(X) assert_equal(log_proba.shape, (n_samples, n_classes)) decision_function = pipe.decision_function(X) assert_equal(decision_function.shape, (n_samples, n_classes)) pipe.score(X, y) def test_fit_predict_on_pipeline(): # test that the fit_predict method is implemented on a pipeline # test that the fit_predict on pipeline yields same results as applying # transform and clustering steps separately iris = load_iris() scaler = StandardScaler() km = KMeans(random_state=0) # first compute the transform and clustering step separately scaled = scaler.fit_transform(iris.data) separate_pred = km.fit_predict(scaled) # use a pipeline to do the transform and clustering in one step pipe = Pipeline([('scaler', scaler), ('Kmeans', km)]) pipeline_pred = pipe.fit_predict(iris.data) assert_array_almost_equal(pipeline_pred, separate_pred) def test_fit_predict_on_pipeline_without_fit_predict(): # tests that a pipeline does not have fit_predict method when final # step of pipeline does not have fit_predict defined scaler = StandardScaler() pca = PCA() pipe = Pipeline([('scaler', scaler), ('pca', pca)]) assert_raises_regex(AttributeError, "'PCA' object has no attribute 'fit_predict'", getattr, pipe, 'fit_predict') def test_feature_union(): # basic sanity check for feature union iris = load_iris() X = iris.data X -= X.mean(axis=0) y = iris.target svd = TruncatedSVD(n_components=2, random_state=0) select = SelectKBest(k=1) fs = FeatureUnion([("svd", svd), ("select", select)]) fs.fit(X, y) X_transformed = fs.transform(X) assert_equal(X_transformed.shape, (X.shape[0], 3)) # check if it does the expected thing assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) # test if it also works for sparse input # We use a different svd object to control the random_state stream fs = FeatureUnion([("svd", svd), ("select", select)]) X_sp = sparse.csr_matrix(X) X_sp_transformed = fs.fit_transform(X_sp, y) assert_array_almost_equal(X_transformed, X_sp_transformed.toarray()) # test setting parameters fs.set_params(select__k=2) assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4)) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)]) X_transformed = fs.fit_transform(X, y) assert_equal(X_transformed.shape, (X.shape[0], 8)) def test_make_union(): pca = PCA() mock = TransfT() fu = make_union(pca, mock) names, transformers = zip(*fu.transformer_list) assert_equal(names, ("pca", "transft")) assert_equal(transformers, (pca, mock)) def test_pipeline_transform(): # Test whether pipeline works with a transformer at the end. # Also test pipeline.transform and pipeline.inverse_transform iris = load_iris() X = iris.data pca = PCA(n_components=2) pipeline = Pipeline([('pca', pca)]) # test transform and fit_transform: X_trans = pipeline.fit(X).transform(X) X_trans2 = pipeline.fit_transform(X) X_trans3 = pca.fit_transform(X) assert_array_almost_equal(X_trans, X_trans2) assert_array_almost_equal(X_trans, X_trans3) X_back = pipeline.inverse_transform(X_trans) X_back2 = pca.inverse_transform(X_trans) assert_array_almost_equal(X_back, X_back2) def test_pipeline_fit_transform(): # Test whether pipeline works with a transformer missing fit_transform iris = load_iris() X = iris.data y = iris.target transft = TransfT() pipeline = Pipeline([('mock', transft)]) # test fit_transform: X_trans = pipeline.fit_transform(X, y) X_trans2 = transft.fit(X, y).transform(X) assert_array_almost_equal(X_trans, X_trans2) def test_make_pipeline(): t1 = TransfT() t2 = TransfT() pipe = make_pipeline(t1, t2) assert_true(isinstance(pipe, Pipeline)) assert_equal(pipe.steps[0][0], "transft-1") assert_equal(pipe.steps[1][0], "transft-2") pipe = make_pipeline(t1, t2, FitParamT()) assert_true(isinstance(pipe, Pipeline)) assert_equal(pipe.steps[0][0], "transft-1") assert_equal(pipe.steps[1][0], "transft-2") assert_equal(pipe.steps[2][0], "fitparamt") def test_feature_union_weights(): # test feature union with transformer weights iris = load_iris() X = iris.data y = iris.target pca = RandomizedPCA(n_components=2, random_state=0) select = SelectKBest(k=1) # test using fit followed by transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) fs.fit(X, y) X_transformed = fs.transform(X) # test using fit_transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) X_fit_transformed = fs.fit_transform(X, y) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)], transformer_weights={"mock": 10}) X_fit_transformed_wo_method = fs.fit_transform(X, y) # check against expected result # We use a different pca object to control the random_state stream assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_array_almost_equal(X_fit_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_fit_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7)) def test_feature_union_parallel(): # test that n_jobs work for FeatureUnion X = JUNK_FOOD_DOCS fs = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ]) fs_parallel = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs_parallel2 = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs.fit(X) X_transformed = fs.transform(X) assert_equal(X_transformed.shape[0], len(X)) fs_parallel.fit(X) X_transformed_parallel = fs_parallel.transform(X) assert_equal(X_transformed.shape, X_transformed_parallel.shape) assert_array_equal( X_transformed.toarray(), X_transformed_parallel.toarray() ) # fit_transform should behave the same X_transformed_parallel2 = fs_parallel2.fit_transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) # transformers should stay fit after fit_transform X_transformed_parallel2 = fs_parallel2.transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) def test_feature_union_feature_names(): word_vect = CountVectorizer(analyzer="word") char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3)) ft = FeatureUnion([("chars", char_vect), ("words", word_vect)]) ft.fit(JUNK_FOOD_DOCS) feature_names = ft.get_feature_names() for feat in feature_names: assert_true("chars__" in feat or "words__" in feat) assert_equal(len(feature_names), 35) def test_classes_property(): iris = load_iris() X = iris.data y = iris.target reg = make_pipeline(SelectKBest(k=1), LinearRegression()) reg.fit(X, y) assert_raises(AttributeError, getattr, reg, "classes_") clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0)) assert_raises(AttributeError, getattr, clf, "classes_") clf.fit(X, y) assert_array_equal(clf.classes_, np.unique(y))
bsd-3-clause
yannikbehr/spectroscopy
src/spectroscopy/visualize.py
1
6256
""" Overview plots for different elements in a dataset. """ import matplotlib import matplotlib.pyplot as plt from matplotlib.colors import Normalize, LogNorm from matplotlib.pyplot import cm import numpy as np import pandas as pd import tables from scipy.stats import binned_statistic import cartopy.crs as ccrs from cartopy.io.img_tiles import StamenTerrain import pyproj from spectroscopy.util import split_by_scan, vec2bearing class VizException(Exception): pass def plot_concentration(c, savefig=None, angle_bin=1.0, **kargs): """ Provide overview plots for data contained in a dataset. :type toplot: str :param toplot: Choose the datatype to plot. Parameters specific to `retrievals` contour plots: :type log: bool :param log: Turn on logarithmic colour scales. :type cmap_name: str :param cmap_name: The name of the matplotlib colour scale to use. :type angle_bins: :class:`numpy.ndarray` :param angle_bins: Define the bins onto which the angles of the retrievals are discretized to. :type ncontours: int :param ncontours: Number of contours used in the contour plot. """ matplotlib.style.use('classic') cmap_name = kargs.get('cmap_name', 'RdBu_r') log = kargs.get('log', False) angle_bins = kargs.get('angle_bins', np.arange(0, 180+angle_bin, angle_bin)) ncontours = kargs.get('ncontours', 100) ts = kargs.get('timeshift', 0.0) * 60. * 60. cmap = cm.get_cmap(cmap_name) # dicretize all retrievals onto a grid to show a daily plot for r in c.rawdata[:]: if r.type.name[0] == 'measurement': break m = [] times = [] ymin = angle_bins[-1] ymax = angle_bins[0] nretrieval = 0 for _angle, _so2, _t in split_by_scan(r.inc_angle[c.rawdata_indices[:]], c.value[:], r.datetime[c.rawdata_indices[:]]): ymin = min(_angle.min(), ymin) ymax = max(_angle.max(), ymax) times.append(_t) _so2_binned = binned_statistic( _angle, _so2, 'mean', angle_bins) m.append(_so2_binned.statistic) nretrieval += 1 m = np.array(m) fig = plt.figure() if log: z = np.where(m > 0.0, m, 0.1) plt.contourf(list(range(nretrieval)), angle_bins[1:], z.T, ncontours, norm=LogNorm(z.min(), z.max()), cmap=cmap) else: z = np.ma.masked_invalid(m) plt.contourf(list(range(nretrieval)), angle_bins[1:], m.T, ncontours, norm=Normalize(z.min(), z.max()), cmap=cmap) new_labels = [] new_ticks = [] for _xt in plt.xticks()[0]: try: dt = times[int(_xt)].astype('datetime64[us]').min() dt += np.timedelta64(int(ts), 's') new_labels.append((pd.to_datetime(str(dt)) .strftime("%Y-%m-%d %H:%M"))) new_ticks.append(_xt) except IndexError: continue plt.xticks(new_ticks, new_labels, rotation=30, horizontalalignment='right') cb = plt.colorbar() cb.set_label('Slant column amount SO2 [ppm m]') plt.ylim(ymin, ymax) plt.ylabel(r'Angle [$\circ$]') if savefig is not None: plt.savefig( savefig, bbox_inches='tight', dpi=300, format='png') return fig def plot_rawdata(r, savefig=None, **kargs): matplotlib.style.use('ggplot') try: dmin = kargs['datemin'] dmax = kargs['datemax'] except KeyError: idx = np.arange(r.d_var.shape[0]) else: try: dt = r.datetime[:].astype('datetime64[ms]') idx = np.where(((dt > np.datetime64(dmin)) & (dt < np.datetime64(dmax))))[0] except tables.NoSuchNodeError: idx = np.arange(r.d_var.shape[0]) counts = r.d_var[idx, :] w = r.ind_var[:] nc = counts.shape[0] cmap = cm.ScalarMappable(norm=Normalize(vmin=0, vmax=nc-1), cmap='RdBu') fig = plt.figure(figsize=(12, 6)) for i in range(nc): c = cmap.to_rgba(i) plt.plot(w, counts[i], color=c, alpha=0.2) plt.xlabel('Wavelength [nm]') plt.ylabel('Intensity') cax, kw = matplotlib.colorbar.make_axes(plt.gca()) norm = Normalize(vmin=0, vmax=nc, clip=False) c = matplotlib.colorbar.ColorbarBase(cax, cmap='RdBu', norm=norm) ticks = np.array([0, int(nc/2.), nc-1]) c.set_ticks(ticks) try: times = r.datetime[idx] labels = np.array([times[0], times[int(nc/2.)], times[nc-1]]) c.set_ticklabels(labels) except tables.NoSuchNodeError: pass if savefig is not None: plt.savefig( savefig, bbox_inches='tight', dpi=300, format='png') return fig def plot_gasflow(gf, vent=None, scale=100., **kargs): if vent is None: raise VizException("Please provide a vent location (lon, lat)") pos = gf.position[:] vx = gf.vx[:] vy = gf.vy[:] lon_min = vent[0] - 0.03 lon_max = vent[0] + 0.03 lat_min = vent[1] - 0.03 lat_max = vent[1] + 0.03 tiler = StamenTerrain() mercator = tiler.crs fig = plt.figure(figsize=(10, 10)) ax = plt.axes(projection=mercator) fig.add_axes(ax) ax.add_image(tiler, 11) p = ccrs.PlateCarree() g = pyproj.Geod(ellps='WGS84') for lon, lat, _vx, _vy in zip(pos[:, 0], pos[:, 1], vx, vy): wd = vec2bearing(_vx, _vy) ws = np.sqrt(_vx * _vx + _vy * _vy)*scale elon, elat, _ = g.fwd(lon, lat, wd, ws) x, y = p.transform_points(ccrs.Geodetic(), np.array([lon, elon]), np.array([lat, elat])) dx = y[0] - x[0] dy = y[1] - x[1] ax.quiver(np.array([x[0]]), np.array([x[1]]), np.array([dx]), np.array([dy]), transform=ccrs.PlateCarree()) ax.scatter(vent[0], vent[1], marker='^', color='red', s=50, transform=ccrs.Geodetic()) ax.set_extent([lon_min, lon_max, lat_min, lat_max]) return fig def plot(element, **kargs): name = str(element).replace('Buffer', '') name = 'plot_'+name.lower() globals()[name](element, **kargs)
gpl-3.0
linearregression/mpld3
mpld3/_display.py
15
16996
import warnings import random import json import jinja2 import numpy import re import os from ._server import serve from .utils import deprecated, get_id, write_ipynb_local_js from .mplexporter import Exporter from .mpld3renderer import MPLD3Renderer from . import urls __all__ = ["fig_to_html", "fig_to_dict", "fig_to_d3", "display_d3", "display", "show_d3", "show", "enable_notebook", "disable_notebook", "save_html", "save_json"] # Simple HTML template. This works in standalone web pages for single figures, # but will not work within the IPython notebook due to the presence of # requirejs SIMPLE_HTML = jinja2.Template(""" <script type="text/javascript" src="{{ d3_url }}"></script> <script type="text/javascript" src="{{ mpld3_url }}"></script> <style> {{ extra_css }} </style> <div id={{ figid }}></div> <script type="text/javascript"> !function(mpld3){ {{ extra_js }} mpld3.draw_figure({{ figid }}, {{ figure_json }}); }(mpld3); </script> """) # RequireJS template. If requirejs and jquery are not defined, this will # result in an error. This is suitable for use within the IPython notebook. REQUIREJS_HTML = jinja2.Template(""" <style> {{ extra_css }} </style> <div id={{ figid }}></div> <script type="text/javascript"> if(typeof(window.mpld3) !== "undefined" && window.mpld3._mpld3IsLoaded){ !function (mpld3){ {{ extra_js }} mpld3.draw_figure({{ figid }}, {{ figure_json }}); }(mpld3); }else{ require.config({paths: {d3: "{{ d3_url[:-3] }}"}}); require(["d3"], function(d3){ window.d3 = d3; $.getScript("{{ mpld3_url }}", function(){ {{ extra_js }} mpld3.draw_figure({{ figid }}, {{ figure_json }}); }); }); } </script> """) # General HTML template. This should work correctly whether or not requirejs # is defined, and whether it's embedded in a notebook or in a standalone # HTML page. GENERAL_HTML = jinja2.Template(""" <style> {{ extra_css }} </style> <div id={{ figid }}></div> <script> function mpld3_load_lib(url, callback){ var s = document.createElement('script'); s.src = url; s.async = true; s.onreadystatechange = s.onload = callback; s.onerror = function(){console.warn("failed to load library " + url);}; document.getElementsByTagName("head")[0].appendChild(s); } if(typeof(mpld3) !== "undefined" && mpld3._mpld3IsLoaded){ // already loaded: just create the figure !function(mpld3){ {{ extra_js }} mpld3.draw_figure({{ figid }}, {{ figure_json }}); }(mpld3); }else if(typeof define === "function" && define.amd){ // require.js is available: use it to load d3/mpld3 require.config({paths: {d3: "{{ d3_url[:-3] }}"}}); require(["d3"], function(d3){ window.d3 = d3; mpld3_load_lib("{{ mpld3_url }}", function(){ {{ extra_js }} mpld3.draw_figure({{ figid }}, {{ figure_json }}); }); }); }else{ // require.js not available: dynamically load d3 & mpld3 mpld3_load_lib("{{ d3_url }}", function(){ mpld3_load_lib("{{ mpld3_url }}", function(){ {{ extra_js }} mpld3.draw_figure({{ figid }}, {{ figure_json }}); }) }); } </script> """) TEMPLATE_DICT = {"simple": SIMPLE_HTML, "notebook": REQUIREJS_HTML, "general": GENERAL_HTML} class NumpyEncoder(json.JSONEncoder): """ Special json encoder for numpy types """ def default(self, obj): if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8, numpy.int16, numpy.int32, numpy.int64, numpy.uint8, numpy.uint16,numpy.uint32, numpy.uint64)): return int(obj) elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32, numpy.float64)): return float(obj) return json.JSONEncoder.default(self, obj) def fig_to_dict(fig, **kwargs): """Output json-serializable dictionary representation of the figure Parameters ---------- fig : matplotlib figure The figure to display **kwargs : Additional keyword arguments passed to mplexporter.Exporter Returns ------- fig_dict : dict the Python dictionary representation of the figure, which is directly convertible to json using the standard json package. See Also -------- :func:`save_json`: save json representation of a figure to file :func:`save_html` : save html representation of a figure to file :func:`fig_to_html` : output html representation of the figure :func:`show` : launch a local server and show a figure in a browser :func:`display` : embed figure within the IPython notebook :func:`enable_notebook` : automatically embed figures in IPython notebook """ renderer = MPLD3Renderer() Exporter(renderer, close_mpl=False, **kwargs).run(fig) fig, figure_dict, extra_css, extra_js = renderer.finished_figures[0] return figure_dict def fig_to_html(fig, d3_url=None, mpld3_url=None, no_extras=False, template_type="general", figid=None, use_http=False, **kwargs): """Output html representation of the figure Parameters ---------- fig : matplotlib figure The figure to display d3_url : string (optional) The URL of the d3 library. If not specified, a standard web path will be used. mpld3_url : string (optional) The URL of the mpld3 library. If not specified, a standard web path will be used. no_extras : boolean If true, remove any extra javascript or CSS. The output will be similar to that if the representation output by fig_to_json is embedded in a web page. template_type : string string specifying the type of HTML template to use. Options are: ``"simple"`` suitable for a simple html page with one figure. Will fail if require.js is available on the page. ``"notebook"`` assumes require.js and jquery are available. ``"general"`` more complicated, but works both in and out of the notebook, whether or not require.js and jquery are available figid : string (optional) The html/css id of the figure div, which must not contain spaces. If not specified, a random id will be generated. use_http : boolean (optional) If true, use http:// instead of https:// for d3_url and mpld3_url. **kwargs : Additional keyword arguments passed to mplexporter.Exporter Returns ------- fig_html : string the HTML representation of the figure See Also -------- :func:`save_json`: save json representation of a figure to file :func:`save_html` : save html representation of a figure to file :func:`fig_to_dict` : output dictionary representation of the figure :func:`show` : launch a local server and show a figure in a browser :func:`display` : embed figure within the IPython notebook :func:`enable_notebook` : automatically embed figures in IPython notebook """ template = TEMPLATE_DICT[template_type] # TODO: allow fig to be a list of figures? d3_url = d3_url or urls.D3_URL mpld3_url = mpld3_url or urls.MPLD3_URL if use_http: d3_url = d3_url.replace('https://', 'http://') mpld3_url = mpld3_url.replace('https://', 'http://') if figid is None: figid = 'fig_' + get_id(fig) + str(int(random.random() * 1E10)) elif re.search('\s', figid): raise ValueError("figid must not contain spaces") renderer = MPLD3Renderer() Exporter(renderer, close_mpl=False, **kwargs).run(fig) fig, figure_json, extra_css, extra_js = renderer.finished_figures[0] if no_extras: extra_css = "" extra_js = "" return template.render(figid=json.dumps(figid), d3_url=d3_url, mpld3_url=mpld3_url, figure_json=json.dumps(figure_json, cls=NumpyEncoder), extra_css=extra_css, extra_js=extra_js) def display(fig=None, closefig=True, local=False, **kwargs): """Display figure in IPython notebook via the HTML display hook Parameters ---------- fig : matplotlib figure The figure to display (grabs current figure if missing) closefig : boolean (default: True) If true, close the figure so that the IPython matplotlib mode will not display the png version of the figure. local : boolean (optional, default=False) if True, then copy the d3 & mpld3 libraries to a location visible to the notebook server, and source them from there. See Notes below. **kwargs : additional keyword arguments are passed through to :func:`fig_to_html`. Returns ------- fig_d3 : IPython.display.HTML object the IPython HTML rich display of the figure. Notes ----- Known issues: using ``local=True`` may not work correctly in certain cases: - In IPython < 2.0, ``local=True`` may fail if the current working directory is changed within the notebook (e.g. with the %cd command). - In IPython 2.0+, ``local=True`` may fail if a url prefix is added (e.g. by setting NotebookApp.base_url). See Also -------- :func:`show` : launch a local server and show a figure in a browser :func:`enable_notebook` : automatically embed figures in IPython notebook """ # import here, in case users don't have requirements installed from IPython.display import HTML import matplotlib.pyplot as plt if local: if 'mpld3_url' in kwargs or 'd3_url' in kwargs: warnings.warn( "display: specified urls are ignored when local=True") kwargs['d3_url'], kwargs['mpld3_url'] = write_ipynb_local_js() if fig is None: fig = plt.gcf() if closefig: plt.close(fig) return HTML(fig_to_html(fig, **kwargs)) def show(fig=None, ip='127.0.0.1', port=8888, n_retries=50, local=True, open_browser=True, http_server=None, **kwargs): """Open figure in a web browser Similar behavior to plt.show(). This opens the D3 visualization of the specified figure in the web browser. On most platforms, the browser will open automatically. Parameters ---------- fig : matplotlib figure The figure to display. If not specified, the current active figure will be used. ip : string, default = '127.0.0.1' the ip address used for the local server port : int, default = 8888 the port number to use for the local server. If already in use, a nearby open port will be found (see n_retries) n_retries : int, default = 50 the maximum number of ports to try when locating an empty port. local : bool, default = True if True, use the local d3 & mpld3 javascript versions, within the js/ folder. If False, use the standard urls. open_browser : bool (optional) if True (default), then open a web browser to the given HTML http_server : class (optional) optionally specify an HTTPServer class to use for showing the figure. The default is Python's basic HTTPServer. **kwargs : additional keyword arguments are passed through to :func:`fig_to_html` See Also -------- :func:`display` : embed figure within the IPython notebook :func:`enable_notebook` : automatically embed figures in IPython notebook """ if local: kwargs['mpld3_url'] = '/mpld3.js' kwargs['d3_url'] = '/d3.js' files = {'/mpld3.js': ["text/javascript", open(urls.MPLD3_LOCAL, 'r').read()], '/d3.js': ["text/javascript", open(urls.D3_LOCAL, 'r').read()]} else: files = None if fig is None: # import here, in case matplotlib.use(...) is called by user import matplotlib.pyplot as plt fig = plt.gcf() html = fig_to_html(fig, **kwargs) serve(html, ip=ip, port=port, n_retries=n_retries, files=files, open_browser=open_browser, http_server=http_server) def enable_notebook(local=False, **kwargs): """Enable the automatic display of figures in the IPython Notebook. This function should be used with the inline Matplotlib backend that ships with IPython that can be enabled with `%pylab inline` or `%matplotlib inline`. This works by adding an HTML formatter for Figure objects; the existing SVG/PNG formatters will remain enabled. Parameters ---------- local : boolean (optional, default=False) if True, then copy the d3 & mpld3 libraries to a location visible to the notebook server, and source them from there. See Notes below. **kwargs : all keyword parameters are passed through to :func:`fig_to_html` Notes ----- Known issues: using ``local=True`` may not work correctly in certain cases: - In IPython < 2.0, ``local=True`` may fail if the current working directory is changed within the notebook (e.g. with the %cd command). - In IPython 2.0+, ``local=True`` may fail if a url prefix is added (e.g. by setting NotebookApp.base_url). See Also -------- :func:`disable_notebook` : undo the action of enable_notebook :func:`display` : embed figure within the IPython notebook :func:`show` : launch a local server and show a figure in a browser """ try: from IPython.core.getipython import get_ipython from matplotlib.figure import Figure except ImportError: raise ImportError('This feature requires IPython 1.0+ and Matplotlib') if local: if 'mpld3_url' in kwargs or 'd3_url' in kwargs: warnings.warn( "enable_notebook: specified urls are ignored when local=True") kwargs['d3_url'], kwargs['mpld3_url'] = write_ipynb_local_js() ip = get_ipython() formatter = ip.display_formatter.formatters['text/html'] formatter.for_type(Figure, lambda fig, kwds=kwargs: fig_to_html(fig, **kwds)) def disable_notebook(): """Disable the automatic display of figures in the IPython Notebook. See Also -------- :func:`enable_notebook` : automatically embed figures in IPython notebook """ try: from IPython.core.getipython import get_ipython from matplotlib.figure import Figure except ImportError: raise ImportError('This feature requires IPython 1.0+ and Matplotlib') ip = get_ipython() formatter = ip.display_formatter.formatters['text/html'] formatter.type_printers.pop(Figure, None) def save_html(fig, fileobj, **kwargs): """Save a matplotlib figure to an html file Parameters ---------- fig : matplotlib Figure instance The figure to write to file. fileobj : filename or file object The filename or file-like object in which to write the HTML representation of the figure. **kwargs : additional keyword arguments will be passed to :func:`fig_to_html` See Also -------- :func:`save_json`: save json representation of a figure to file :func:`fig_to_html` : output html representation of the figure :func:`fig_to_dict` : output dictionary representation of the figure """ if isinstance(fileobj, str): fileobj = open(fileobj, 'w') if not hasattr(fileobj, 'write'): raise ValueError("fileobj should be a filename or a writable file") fileobj.write(fig_to_html(fig, **kwargs)) def save_json(fig, fileobj, **kwargs): """Save a matplotlib figure to a json file. Note that any plugins which depend on generated HTML will not be included in the JSON encoding. Parameters ---------- fig : matplotlib Figure instance The figure to write to file. fileobj : filename or file object The filename or file-like object in which to write the HTML representation of the figure. **kwargs : additional keyword arguments will be passed to :func:`fig_to_dict` See Also -------- :func:`save_html` : save html representation of a figure to file :func:`fig_to_html` : output html representation of the figure :func:`fig_to_dict` : output dictionary representation of the figure """ if isinstance(fileobj, str): fileobj = open(fileobj, 'w') if not hasattr(fileobj, 'write'): raise ValueError("fileobj should be a filename or a writable file") json.dump(fig_to_dict(fig, **kwargs), fileobj) # Deprecated versions of these functions show_d3 = deprecated(show, "mpld3.show_d3", "mpld3.show") fig_to_d3 = deprecated(fig_to_html, "mpld3.fig_to_d3", "mpld3.fig_to_html") display_d3 = deprecated(display, "mpld3.display_d3", "mpld3.display")
bsd-3-clause
james-nichols/dtrw
pathwise_paper/pathwise_dtrw_vary_t.py
1
2045
#!/usr/local/bin/python3 # Libraries are in parent directory import sys sys.path.append('../') import sys import time import math import random import numpy as np import scipy import matplotlib # Force matplotlib to not use any Xwindows backend. matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns from dtrw import * import pdb from pathwise_dtrw import * ###################### # Compare approaches # ###################### if __name__ == "__main__": # Number of MC points N = int(1e6) alpha = 0.7 D_alpha = 0.1 dX = 2.e-2 end_points = [-1., 1.] num_points = int((end_points[1]-end_points[0])/dX) xs = np.linspace(end_points[0] + 0.5 * dX, end_points[1] - 0.5 * dX, num_points) dtrw_times = [] mc_times = [] dtrw_solns = [] mc_solns = [] dtrw_diffs = [] mc_diffs = [] sns.set_style("whitegrid") Ts = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0] for T in Ts: dT = pow((dX*dX/ (2.0 * D_alpha)), 1. / alpha) num_t = int(T / dT) T_grid = dT * num_t print(T, '\t', num_t * dT, ',\t', num_t, 'x', num_points, 'grid') mc_start = time.clock() mc_soln, n = mc_solve(xs, N, num_t, alpha) mc_end = time.clock() dtrw_soln = dtrw_solve(xs, num_t, alpha) dtrw_end = time.clock() mc_times.append(mc_end - mc_start) dtrw_times.append(dtrw_end - mc_end) c = sns.color_palette("deep", 10) f, ax = plt.subplots()#figsize=(4, 3)) ax.set(yscale="log") plt.plot(Ts, mc_times, 's', markerfacecolor='none', mew=2, markersize=10, mec=c[1], label='Monte Carlo') plt.plot(Ts, dtrw_times, 'o', markerfacecolor='none', mew=2, markersize=10, mec=c[2], label='DTRW') plt.xlabel('T') plt.ylabel('Computation time, log scale') plt.legend() plt.savefig('dtrw_mc_T_comp_time_{0}.pdf'.format(N)) np.savetxt('mc_times_vary_T_{0}.csv'.format(N), mc_times) np.savetxt('dtrw_times_vary_T_{0}.csv'.format(N), dtrw_times)
gpl-2.0
SimonHL/TSA
LSTM.py
1
5715
# -*- coding: utf-8 -*- from collections import OrderedDict import numpy import theano import theano.tensor as T import time import sys import matplotlib.pyplot as plt from collections import OrderedDict import copy import utilities.datagenerator as DG reload(DG) compile_mode = 'FAST_COMPILE' # compile_mode = 'FAST_RUN' # Set the random number generators' seeds for consistency SEED = 100 numpy.random.seed(SEED) def lstm_layer(n_input, n_LSTM, x): ''' i f o c 统一处理 ''' def _slice(_x, n, dim): if _x.ndim == 3: return _x[:, :, n * dim:(n + 1) * dim] return _x[:, n * dim:(n + 1) * dim] def _step(*args): ''' x_ : 延时输入的x h_ : 前一时刻单元的输出 c_ : 前一时刻单元的Cell值 ''' x = [args[u] for u in xrange(n_input)] h_ = args[n_input] c_ = args[n_input+1] preact = T.dot(x[0], W_in[0]) for i in xrange(1,n_input): preact += T.dot(x[i], W_in[i]) preact += T.dot(h_, W_hid) # h的后向 preact += b_in i = T.nnet.sigmoid(_slice(preact, 0, n_LSTM)) # input gate f = T.nnet.sigmoid(_slice(preact, 1, n_LSTM)) # forget gate o = T.nnet.sigmoid(_slice(preact, 2, n_LSTM)) # output gate c = T.tanh(_slice(preact, 3, n_LSTM)) # cell state pre c = f * c_ + i * c # cell state h = o * T.tanh(c) # unit output return h, c out_h = theano.shared(numpy.zeros((1,n_LSTM), dtype=theano.config.floatX), name="out_h") out_c = theano.shared(numpy.zeros((1,n_LSTM), dtype=theano.config.floatX), name="out_c") input_taps = range(1-n_input, 1) rval, updates = theano.scan(_step, sequences=dict(input=x,taps=input_taps), outputs_info=[out_h, out_c]) return rval[0] # 对外只有h # 设置网络参数 n_input = 7 n_hidden = 5 n_output = 1 n_epochs = 20 dtype=theano.config.floatX theano.config.exception_verbosity = 'low' # 加要处理的数据 g = DG.Generator() data_x,data_y = g.get_data('mackey_glass') index_test_begin = data_y.shape[0] / 2 train_data_index = numpy.arange(index_test_begin) test_data_index = numpy.arange(index_test_begin, data_y.shape[0]) train_data = data_y[train_data_index] test_data = data_y[test_data_index] print 'train_data.shape: ', train_data.shape print 'test_data.shape: ', test_data.shape print 'network: n_in:{},n_hidden:{},n_out:{}'.format(n_input, n_hidden, n_output) x = T.vector() # 输入向量,第1维是时间 y = T.vector() # 输出向量, 第1维是时间 W_in = [theano.shared(numpy.random.uniform(size=(1, 4*n_hidden), low= -0.01, high=0.01).astype(dtype), name='W_in' + str(u)) for u in range(n_input)] b_in = theano.shared(numpy.zeros((4 * n_hidden,), dtype=dtype), name="b_in") W_hid = theano.shared(numpy.random.uniform(size=(n_hidden, 4*n_hidden), low= -0.01, high=0.01).astype(dtype), name='W_hid') W_out = theano.shared(numpy.random.uniform(size=(n_hidden,n_output),low=-0.01,high=0.01).astype(dtype),name="W_out") b_out = theano.shared(numpy.zeros((n_output,), dtype=dtype),name="b_out") params = [] params.extend(W_in) params.extend([b_in]) params.extend([W_hid]) params.extend([W_out]) params.extend([b_out]) h_tmp = lstm_layer(n_input, n_hidden, x) pred = T.dot(h_tmp, W_out) + b_out pred = theano.tensor.flatten(pred) f_pred = theano.function([x], pred, name='f_pred') cost = ((pred - y)**2).sum() batch_size = 400 # 设置的足够大时,等价于GD print 'Batch Size: ', batch_size grads = theano.tensor.grad(cost, wrt=params) tparams = OrderedDict() for p in params: tparams[p.name] = p lr_v = 0.0001 lr_ada = theano.tensor.scalar(name='lr_ada') updates_1, updates_2, f_grad_shared, f_update = DG.PublicFunction.adadelta(lr_ada, tparams, grads, [x, y], cost) sim_fn = theano.function([x],outputs=pred) start_time = time.clock() for epochs_index in xrange(n_epochs) : kf = DG.DataPrepare.get_seq_minibatches_idx(train_data.shape[0], batch_size, n_input, shuffle=False) for batch_index, train_index in kf: sub_seq = train_data[train_index] _x, _y = DG.PublicFunction.data_get_data_x_y(sub_seq, n_input) train_err = f_grad_shared(_x, _y) f_update(lr_v) print '{}.{}: cost={}'.format(epochs_index, batch_index, train_err) x_train_end = copy.deepcopy(train_data[-n_input:]) n_predict = 100 y_predict = numpy.zeros((n_predict,)) cumulative_error = 0 cumulative_error_list = numpy.zeros((n_predict,)) for i in numpy.arange(n_predict): y_predict[i] = sim_fn(x_train_end) x_train_end[:-1] = x_train_end[1:] x_train_end[-1] = y_predict[i] cumulative_error += numpy.abs(y_predict[i] - test_data[i]) cumulative_error_list[i] = cumulative_error plt.figure(3) plt.plot(numpy.arange(n_predict), cumulative_error_list) plt.title('cumulative error') plt.grid(True) plt.figure(1) plt.plot(numpy.arange(y_predict.shape[0]), y_predict,'r') plt.plot(numpy.arange(300), test_data[:300],'g') y_sim = sim_fn(data_x[:-1]) # 整体的单步误差 print 'y_sim.shape: ', y_sim.shape plt.figure(2) plt.plot(range(data_y.shape[0]), data_y,'k') plt.plot(range(data_y.shape[0]-y_sim.shape[0], data_y.shape[0]), y_sim, 'r') plt.plot(range(data_y.shape[0]-y_sim.shape[0], data_y.shape[0]), y_sim - data_y[n_input:], 'g') print >> sys.stderr, ('overall time (%.5fs)' % ((time.clock() - start_time) / 1.)) plt.show() print "finished!"
bsd-2-clause
shaunstanislaus/pandashells
pandashells/test/module_checker_lib_tests.py
7
1443
#! /usr/bin/env python from unittest import TestCase from pandashells.lib.module_checker_lib import check_for_modules from pandashells.lib import module_checker_lib from mock import patch class ModuleCheckerTests(TestCase): def setUp(self): module_checker_lib.CMD_DICT['fakemodule1'] = 'pip install fakemodule1' module_checker_lib.CMD_DICT['fakemodule2'] = 'pip install fakemodule2' module_checker_lib.CMD_DICT['os'] = 'part of standard module' def test_check_for_modules_unrecognized(self): """ check_for_modules() raises error when module is unrecognized """ with self.assertRaises(ValueError): check_for_modules(['not_a_module']) @patch('pandashells.lib.module_checker_lib.importlib.import_module') def test_check_for_modules_no_modules(self, import_module_mock): """ check_for_modules() does nothing when module list is empty """ check_for_modules([]) self.assertFalse(import_module_mock.called) def test_check_for_modules_existing_module(self): """ check_for_modules() successfully finds existing module """ check_for_modules(['os']) def test_check_for_modules_bad(self): """ check_for_modules() correctly identifies missing modules """ with self.assertRaises(ImportError): check_for_modules(['fakemodule1', 'fakemodule2'])
bsd-2-clause
hainm/scikit-learn
sklearn/__init__.py
154
3014
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module='^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.17.dev0' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'qda', 'random_projection', 'semi_supervised', 'svm', 'tree', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
bsd-3-clause
Windy-Ground/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
127
7477
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
pelson/cartopy
lib/cartopy/examples/regridding_arrows.py
4
1656
""" Regridding vectors with quiver ------------------------------ This example demonstrates the regridding functionality in quiver (there exists equivalent functionality in :meth:`cartopy.mpl.geoaxes.GeoAxes.barbs`). Regridding can be an effective way of visualising a vector field, particularly if the data is dense or warped. """ __tags__ = ['Vector data'] import matplotlib.pyplot as plt import numpy as np import cartopy.crs as ccrs def sample_data(shape=(20, 30)): """ Return ``(x, y, u, v, crs)`` of some vector data computed mathematically. The returned CRS will be a North Polar Stereographic projection, meaning that the vectors will be unevenly spaced in a PlateCarree projection. """ crs = ccrs.NorthPolarStereo() scale = 1e7 x = np.linspace(-scale, scale, shape[1]) y = np.linspace(-scale, scale, shape[0]) x2d, y2d = np.meshgrid(x, y) u = 10 * np.cos(2 * x2d / scale + 3 * y2d / scale) v = 20 * np.cos(6 * x2d / scale) return x, y, u, v, crs def main(): fig = plt.figure(figsize=(8, 10)) x, y, u, v, vector_crs = sample_data(shape=(50, 50)) ax1 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree()) ax1.coastlines('50m') ax1.set_extent([-45, 55, 20, 80], ccrs.PlateCarree()) ax1.quiver(x, y, u, v, transform=vector_crs) ax2 = fig.add_subplot(2, 1, 2, projection=ccrs.PlateCarree()) ax2.set_title('The same vector field regridded') ax2.coastlines('50m') ax2.set_extent([-45, 55, 20, 80], ccrs.PlateCarree()) ax2.quiver(x, y, u, v, transform=vector_crs, regrid_shape=20) plt.show() if __name__ == '__main__': main()
lgpl-3.0
Neuroglycerin/neukrill-net-work
generate_local_cache.py
1
2718
#!/usr/bin/env python import sys import numpy as np import sklearn import neukrill_net.utils import neukrill_net.highlevelfeatures import neukrill_net.stacked import time from sklearn.externals import joblib import sklearn.ensemble import sklearn.pipeline import sklearn.feature_selection import sklearn.grid_search # Define output path train_pkl_path = '/disk/data1/s1145806/cached_kpec_train_data_raw.pkl' test_pkl_path = '/disk/data1/s1145806/cached_kpec_test_data_raw.pkl' t0 = time.time() print "Setup..." max_num_kp_orb = 200 max_num_kp_fast = 400 max_num_kp_mser = 200 settings = neukrill_net.utils.Settings('settings.json') X,y = settings.flattened_train_paths(settings.classes) X_test = settings.image_fnames['test'] X = np.array(X) y = np.array(y) detector_list = [lambda image: neukrill_net.image_features.get_ORB_keypoints(image, n=max_num_kp_orb, patchSize=9), lambda image: neukrill_net.image_features.get_FAST_keypoints(image, n=max_num_kp_fast), lambda image: neukrill_net.image_features.get_MSER_keypoints(image, n=max_num_kp_mser)] describer_list = [neukrill_net.image_features.get_ORB_descriptions, neukrill_net.image_features.get_ORB_descriptions, neukrill_net.image_features.get_ORB_descriptions] kprf_base = sklearn.ensemble.RandomForestClassifier(n_estimators=1000, max_depth=15, min_samples_leaf=20, n_jobs=16, random_state=42) hlf_list = [] for index,detector in enumerate(detector_list): hlf_list += [neukrill_net.highlevelfeatures.KeypointEnsembleClassifier(detector, describer_list[index], kprf_base, return_num_kp=True, n_jobs=0, verbosity=1, summary_method='vote')] hlf = neukrill_net.highlevelfeatures.MultiHighLevelFeature(hlf_list) # Partition the data print "Partitioning the training data" # Remove the data which is going to be held out li_test = neukrill_net.utils.train_test_split_bool(settings.image_fnames, 'test', train_split=0.8, classes=settings.classes) li_nottest = np.logical_not(li_test) indices_nottest = np.where(li_nottest)[0] # Split the remaining data inner, outer = sklearn.cross_validation.train_test_split(indices_nottest, test_size=0.25, random_state=42) print "Fitting keypoint predictions" hlf.fit(X[inner], y[inner]) print "Transforming training data" XF_train = hlf.transform(X) print "Saving train cache" joblib.dump(XF, train_pkl_path) print "Transforming test data" XF_test = hlf.transform(X_test) print "Saving test cache" joblib.dump(XF_test, test_pkl_path) #joblib.dump(hlf, '/disk/data1/s1145806/kpec_cache/keypoint_transformer.pkl') print "Took {} seconds".format(time.time() -t0)
mit
Hiyorimi/scikit-image
skimage/io/tests/test_mpl_imshow.py
7
4056
from __future__ import division import numpy as np from skimage import io from skimage._shared._warnings import expected_warnings import matplotlib.pyplot as plt def setup(): io.reset_plugins() # test images. Note that they don't have their full range for their dtype, # but we still expect the display range to equal the full dtype range. im8 = np.array([[0, 64], [128, 240]], np.uint8) im16 = im8.astype(np.uint16) * 256 im64 = im8.astype(np.uint64) imf = im8 / 255 im_lo = imf / 1000 im_hi = imf + 10 def n_subplots(ax_im): """Return the number of subplots in the figure containing an ``AxesImage``. Parameters ---------- ax_im : matplotlib.pyplot.AxesImage object The input ``AxesImage``. Returns ------- n : int The number of subplots in the corresponding figure. Notes ----- This function is intended to check whether a colorbar was drawn, in which case two subplots are expected. For standard imshows, one subplot is expected. """ return len(ax_im.get_figure().get_axes()) def test_uint8(): plt.figure() with expected_warnings(["tight_layout : falling back to Agg|\A\Z", "CObject type is marked|\A\Z"]): ax_im = io.imshow(im8) assert ax_im.cmap.name == 'gray' assert ax_im.get_clim() == (0, 255) assert n_subplots(ax_im) == 1 assert ax_im.colorbar is None def test_uint16(): plt.figure() with expected_warnings(["tight_layout : falling back to Agg|\A\Z", "CObject type is marked|\A\Z"]): ax_im = io.imshow(im16) assert ax_im.cmap.name == 'gray' assert ax_im.get_clim() == (0, 65535) assert n_subplots(ax_im) == 1 assert ax_im.colorbar is None def test_float(): plt.figure() with expected_warnings(["tight_layout : falling back to Agg|\A\Z", "CObject type is marked|\A\Z"]): ax_im = io.imshow(imf) assert ax_im.cmap.name == 'gray' assert ax_im.get_clim() == (0, 1) assert n_subplots(ax_im) == 1 assert ax_im.colorbar is None def test_low_dynamic_range(): with expected_warnings(["Low image dynamic range|CObject type is marked", "tight_layout : falling back to Agg|\A\Z"]): ax_im = io.imshow(im_lo) assert ax_im.get_clim() == (im_lo.min(), im_lo.max()) # check that a colorbar was created assert ax_im.colorbar is not None def test_outside_standard_range(): plt.figure() # Warning raised by matplotlib on Windows: # "The CObject type is marked Pending Deprecation in Python 2.7. # Please use capsule objects instead." # Ref: https://docs.python.org/2/c-api/cobject.html with expected_warnings(["out of standard range|CObject type is marked", "tight_layout : falling back to Agg|\A\Z"]): ax_im = io.imshow(im_hi) assert ax_im.get_clim() == (im_hi.min(), im_hi.max()) assert n_subplots(ax_im) == 2 assert ax_im.colorbar is not None def test_nonstandard_type(): plt.figure() # Warning raised by matplotlib on Windows: # "The CObject type is marked Pending Deprecation in Python 2.7. # Please use capsule objects instead." # Ref: https://docs.python.org/2/c-api/cobject.html with expected_warnings(["Low image dynamic range|CObject type is marked", "tight_layout : falling back to Agg|\A\Z"]): ax_im = io.imshow(im64) assert ax_im.get_clim() == (im64.min(), im64.max()) assert n_subplots(ax_im) == 2 assert ax_im.colorbar is not None def test_signed_image(): plt.figure() im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]]) with expected_warnings(["tight_layout : falling back to Agg|\A\Z", "CObject type is marked|\A\Z"]): ax_im = io.imshow(im_signed) assert ax_im.get_clim() == (-0.5, 0.5) assert n_subplots(ax_im) == 2 assert ax_im.colorbar is not None if __name__ == '__main__': np.testing.run_module_suite()
bsd-3-clause
mattilyra/scikit-learn
sklearn/utils/random.py
37
10511
# Author: Hamzeh Alsalhi <[email protected]> # # License: BSD 3 clause from __future__ import division import numpy as np import scipy.sparse as sp import operator import array from sklearn.utils import check_random_state from sklearn.utils.fixes import astype from ._random import sample_without_replacement __all__ = ['sample_without_replacement', 'choice'] # This is a backport of np.random.choice from numpy 1.7 # The function can be removed when we bump the requirements to >=1.7 def choice(a, size=None, replace=True, p=None, random_state=None): """ choice(a, size=None, replace=True, p=None) Generates a random sample from a given 1-D array .. versionadded:: 1.7.0 Parameters ----------- a : 1-D array-like or int If an ndarray, a random sample is generated from its elements. If an int, the random sample is generated as if a was np.arange(n) size : int or tuple of ints, optional Output shape. Default is None, in which case a single value is returned. replace : boolean, optional Whether the sample is with or without replacement. p : 1-D array-like, optional The probabilities associated with each entry in a. If not given the sample assumes a uniform distribution over all entries in a. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns -------- samples : 1-D ndarray, shape (size,) The generated random samples Raises ------- ValueError If a is an int and less than zero, if a or p are not 1-dimensional, if a is an array-like of size 0, if p is not a vector of probabilities, if a and p have different lengths, or if replace=False and the sample size is greater than the population size See Also --------- randint, shuffle, permutation Examples --------- Generate a uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3) # doctest: +SKIP array([0, 3, 4]) >>> #This is equivalent to np.random.randint(0,5,3) Generate a non-uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP array([3, 3, 0]) Generate a uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False) # doctest: +SKIP array([3,1,0]) >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3] Generate a non-uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0]) ... # doctest: +SKIP array([2, 3, 0]) Any of the above can be repeated with an arbitrary array-like instead of just integers. For instance: >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher'] >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3]) ... # doctest: +SKIP array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], dtype='|S11') """ random_state = check_random_state(random_state) # Format and Verify input a = np.array(a, copy=False) if a.ndim == 0: try: # __index__ must return an integer by python rules. pop_size = operator.index(a.item()) except TypeError: raise ValueError("a must be 1-dimensional or an integer") if pop_size <= 0: raise ValueError("a must be greater than 0") elif a.ndim != 1: raise ValueError("a must be 1-dimensional") else: pop_size = a.shape[0] if pop_size is 0: raise ValueError("a must be non-empty") if None != p: p = np.array(p, dtype=np.double, ndmin=1, copy=False) if p.ndim != 1: raise ValueError("p must be 1-dimensional") if p.size != pop_size: raise ValueError("a and p must have same size") if np.any(p < 0): raise ValueError("probabilities are not non-negative") if not np.allclose(p.sum(), 1): raise ValueError("probabilities do not sum to 1") shape = size if shape is not None: size = np.prod(shape, dtype=np.intp) else: size = 1 # Actual sampling if replace: if None != p: cdf = p.cumsum() cdf /= cdf[-1] uniform_samples = random_state.random_sample(shape) idx = cdf.searchsorted(uniform_samples, side='right') # searchsorted returns a scalar idx = np.array(idx, copy=False) else: idx = random_state.randint(0, pop_size, size=shape) else: if size > pop_size: raise ValueError("Cannot take a larger sample than " "population when 'replace=False'") if None != p: if np.sum(p > 0) < size: raise ValueError("Fewer non-zero entries in p than size") n_uniq = 0 p = p.copy() found = np.zeros(shape, dtype=np.int) flat_found = found.ravel() while n_uniq < size: x = random_state.rand(size - n_uniq) if n_uniq > 0: p[flat_found[0:n_uniq]] = 0 cdf = np.cumsum(p) cdf /= cdf[-1] new = cdf.searchsorted(x, side='right') _, unique_indices = np.unique(new, return_index=True) unique_indices.sort() new = new.take(unique_indices) flat_found[n_uniq:n_uniq + new.size] = new n_uniq += new.size idx = found else: idx = random_state.permutation(pop_size)[:size] if shape is not None: idx.shape = shape if shape is None and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array idx = idx.item(0) # Use samples as indices for a if a is array-like if a.ndim == 0: return idx if shape is not None and idx.ndim == 0: # If size == () then the user requested a 0-d array as opposed to # a scalar object when size is None. However a[idx] is always a # scalar and not an array. So this makes sure the result is an # array, taking into account that np.array(item) may not work # for object arrays. res = np.empty((), dtype=a.dtype) res[()] = a[idx] return res return a[idx] def random_choice_csc(n_samples, classes, class_probability=None, random_state=None): """Generate a sparse random matrix given column class distributions Parameters ---------- n_samples : int, Number of samples to draw in each column. classes : list of size n_outputs of arrays of size (n_classes,) List of classes for each column. class_probability : list of size n_outputs of arrays of size (n_classes,) Optional (default=None). Class distribution of each column. If None the uniform distribution is assumed. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- random_matrix : sparse csc matrix of size (n_samples, n_outputs) """ data = array.array('i') indices = array.array('i') indptr = array.array('i', [0]) for j in range(len(classes)): classes[j] = np.asarray(classes[j]) if classes[j].dtype.kind != 'i': raise ValueError("class dtype %s is not supported" % classes[j].dtype) classes[j] = astype(classes[j], np.int64, copy=False) # use uniform distribution if no class_probability is given if class_probability is None: class_prob_j = np.empty(shape=classes[j].shape[0]) class_prob_j.fill(1 / classes[j].shape[0]) else: class_prob_j = np.asarray(class_probability[j]) if np.sum(class_prob_j) != 1.0: raise ValueError("Probability array at index {0} does not sum to " "one".format(j)) if class_prob_j.shape[0] != classes[j].shape[0]: raise ValueError("classes[{0}] (length {1}) and " "class_probability[{0}] (length {2}) have " "different length.".format(j, classes[j].shape[0], class_prob_j.shape[0])) # If 0 is not present in the classes insert it with a probability 0.0 if 0 not in classes[j]: classes[j] = np.insert(classes[j], 0, 0) class_prob_j = np.insert(class_prob_j, 0, 0.0) # If there are nonzero classes choose randomly using class_probability rng = check_random_state(random_state) if classes[j].shape[0] > 1: p_nonzero = 1 - class_prob_j[classes[j] == 0] nnz = int(n_samples * p_nonzero) ind_sample = sample_without_replacement(n_population=n_samples, n_samples=nnz, random_state=random_state) indices.extend(ind_sample) # Normalize probabilites for the nonzero elements classes_j_nonzero = classes[j] != 0 class_probability_nz = class_prob_j[classes_j_nonzero] class_probability_nz_norm = (class_probability_nz / np.sum(class_probability_nz)) classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(), rng.rand(nnz)) data.extend(classes[j][classes_j_nonzero][classes_ind]) indptr.append(len(indices)) return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
bsd-3-clause
PatrickOReilly/scikit-learn
sklearn/cluster/k_means_.py
5
59442
"""K-means clustering""" # Authors: Gael Varoquaux <[email protected]> # Thomas Rueckstiess <[email protected]> # James Bergstra <[email protected]> # Jan Schlueter <[email protected]> # Nelle Varoquaux # Peter Prettenhofer <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Robert Layton <[email protected]> # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, ClusterMixin, TransformerMixin from ..metrics.pairwise import euclidean_distances from ..utils.extmath import row_norms, squared_norm from ..utils.sparsefuncs_fast import assign_rows_csr from ..utils.sparsefuncs import mean_variance_axis from ..utils.fixes import astype from ..utils import check_array from ..utils import check_random_state from ..utils import as_float_array from ..utils import gen_batches from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..utils.random import choice from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.six import string_types from . import _k_means from ._k_means_elkan import k_means_elkan ############################################################################### # Initialization heuristic def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None): """Init n_clusters seeds according to k-means++ Parameters ----------- X: array or sparse matrix, shape (n_samples, n_features) The data to pick seeds for. To avoid memory copy, the input data should be double precision (dtype=np.float64). n_clusters: integer The number of seeds to choose x_squared_norms: array, shape (n_samples,) Squared Euclidean norm of each data point. random_state: numpy.RandomState The generator used to initialize the centers. n_local_trials: integer, optional The number of seeding trials for each center (except the first), of which the one reducing inertia the most is greedily chosen. Set to None to make the number of trials depend logarithmically on the number of seeds (2+log(k)); this is the default. Notes ----- Selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. see: Arthur, D. and Vassilvitskii, S. "k-means++: the advantages of careful seeding". ACM-SIAM symposium on Discrete algorithms. 2007 Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip, which is the implementation used in the aforementioned paper. """ n_samples, n_features = X.shape centers = np.empty((n_clusters, n_features), dtype=X.dtype) assert x_squared_norms is not None, 'x_squared_norms None in _k_init' # Set the number of local seeding trials if none is given if n_local_trials is None: # This is what Arthur/Vassilvitskii tried, but did not report # specific results for other than mentioning in the conclusion # that it helped. n_local_trials = 2 + int(np.log(n_clusters)) # Pick first center randomly center_id = random_state.randint(n_samples) if sp.issparse(X): centers[0] = X[center_id].toarray() else: centers[0] = X[center_id] # Initialize list of closest distances and calculate current potential closest_dist_sq = euclidean_distances( centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True) current_pot = closest_dist_sq.sum() # Pick the remaining n_clusters-1 points for c in range(1, n_clusters): # Choose center candidates by sampling with probability proportional # to the squared distance to the closest existing center rand_vals = random_state.random_sample(n_local_trials) * current_pot candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals) # Compute distances to center candidates distance_to_candidates = euclidean_distances( X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True) # Decide which candidate is the best best_candidate = None best_pot = None best_dist_sq = None for trial in range(n_local_trials): # Compute potential when including center candidate new_dist_sq = np.minimum(closest_dist_sq, distance_to_candidates[trial]) new_pot = new_dist_sq.sum() # Store result if it is the best local trial so far if (best_candidate is None) or (new_pot < best_pot): best_candidate = candidate_ids[trial] best_pot = new_pot best_dist_sq = new_dist_sq # Permanently add best center candidate found in local tries if sp.issparse(X): centers[c] = X[best_candidate].toarray() else: centers[c] = X[best_candidate] current_pot = best_pot closest_dist_sq = best_dist_sq return centers ############################################################################### # K-means batch estimation by EM (expectation maximization) def _validate_center_shape(X, n_centers, centers): """Check if centers is compatible with X and n_centers""" if len(centers) != n_centers: raise ValueError('The shape of the initial centers (%s) ' 'does not match the number of clusters %i' % (centers.shape, n_centers)) if centers.shape[1] != X.shape[1]: raise ValueError( "The number of features of the initial centers %s " "does not match the number of features of the data %s." % (centers.shape[1], X.shape[1])) def _tolerance(X, tol): """Return a tolerance which is independent of the dataset""" if sp.issparse(X): variances = mean_variance_axis(X, axis=0)[1] else: variances = np.var(X, axis=0) return np.mean(variances) * tol def k_means(X, n_clusters, init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=1e-4, random_state=None, copy_x=True, n_jobs=1, algorithm="auto", return_n_iter=False): """K-means clustering algorithm. Read more in the :ref:`User Guide <k_means>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The observations to cluster. n_clusters : int The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. algorithm : "auto", "full" or "elkan", default="auto" K-means algorithm to use. The classical EM-style algorithm is "full". The "elkan" variation is more efficient by using the triangle inequality, but currently doesn't support sparse data. "auto" chooses "elkan" for dense data and "full" for sparse data. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, optional The relative increment in the results before declaring convergence. verbose : boolean, optional Verbosity mode. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. copy_x : boolean, optional When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- centroid : float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label : integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). best_n_iter: int Number of iterations corresponding to the best results. Returned only if `return_n_iter` is set to True. """ if n_init <= 0: raise ValueError("Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError('Number of iterations should be a positive number,' ' got %d instead' % max_iter) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) # If the distances are precomputed every job will create a matrix of shape # (n_clusters, n_samples). To stop KMeans from eating up memory we only # activate this if the created matrix is guaranteed to be under 100MB. 12 # million entries consume a little under 100MB if they are of type double. if precompute_distances == 'auto': n_samples = X.shape[0] precompute_distances = (n_clusters * n_samples) < 12e6 elif isinstance(precompute_distances, bool): pass else: raise ValueError("precompute_distances should be 'auto' or True/False" ", but a value of %r was passed" % precompute_distances) # subtract of mean of x for more accurate distance computations if not sp.issparse(X) or hasattr(init, '__array__'): X_mean = X.mean(axis=0) if not sp.issparse(X): # The copy was already done above X -= X_mean if hasattr(init, '__array__'): init = check_array(init, dtype=X.dtype.type, copy=True) _validate_center_shape(X, n_clusters, init) init -= X_mean if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in k-means instead of n_init=%d' % n_init, RuntimeWarning, stacklevel=2) n_init = 1 # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) best_labels, best_inertia, best_centers = None, None, None if n_clusters == 1: # elkan doesn't make sense for a single cluster, full will produce # the right result. algorithm = "full" if algorithm == "auto": algorithm = "full" if sp.issparse(X) else 'elkan' if algorithm == "full": kmeans_single = _kmeans_single_lloyd elif algorithm == "elkan": kmeans_single = _kmeans_single_elkan else: raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got" " %s" % str(algorithm)) if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # run a k-means once labels, inertia, centers, n_iter_ = kmeans_single( X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, precompute_distances=precompute_distances, tol=tol, x_squared_norms=x_squared_norms, random_state=random_state) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia best_n_iter = n_iter_ else: # parallelisation of k-means runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, tol=tol, precompute_distances=precompute_distances, x_squared_norms=x_squared_norms, # Change seed to ensure variety random_state=seed) for seed in seeds) # Get results with the lowest inertia labels, inertia, centers, n_iters = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_n_iter = n_iters[best] if not sp.issparse(X): if not copy_x: X += X_mean best_centers += X_mean if return_n_iter: return best_centers, best_labels, best_inertia, best_n_iter else: return best_centers, best_labels, best_inertia def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++', verbose=False, x_squared_norms=None, random_state=None, tol=1e-4, precompute_distances=True): if sp.issparse(X): raise ValueError("algorithm='elkan' not supported for sparse input X") X = check_array(X, order="C") random_state = check_random_state(random_state) if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) # init centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms) centers = np.ascontiguousarray(centers) if verbose: print('Initialization complete') centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol, max_iter=max_iter, verbose=verbose) inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64) return labels, inertia, centers, n_iter def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++', verbose=False, x_squared_norms=None, random_state=None, tol=1e-4, precompute_distances=True): """A single run of k-means, assumes preparation completed prior. Parameters ---------- X: array-like of floats, shape (n_samples, n_features) The observations to cluster. n_clusters: int The number of clusters to form as well as the number of centroids to generate. max_iter: int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. init: {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (k, p) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. tol: float, optional The relative increment in the results before declaring convergence. verbose: boolean, optional Verbosity mode x_squared_norms: array Precomputed x_squared_norms. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- centroid: float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label: integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia: float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run. """ random_state = check_random_state(random_state) best_labels, best_inertia, best_centers = None, None, None # init centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms) if verbose: print("Initialization complete") # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype) # iterations for i in range(max_iter): centers_old = centers.copy() # labels assignment is also called the E-step of EM labels, inertia = \ _labels_inertia(X, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances) # computation of the means is also called the M-step of EM if sp.issparse(X): centers = _k_means._centers_sparse(X, labels, n_clusters, distances) else: centers = _k_means._centers_dense(X, labels, n_clusters, distances) if verbose: print("Iteration %2d, inertia %.3f" % (i, inertia)) if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia center_shift_total = squared_norm(centers_old - centers) if center_shift_total <= tol: if verbose: print("Converged at iteration %d: " "center shift %e within tolerance %e" % (i, center_shift_total, tol)) break if center_shift_total > 0: # rerun E-step in case of non-convergence so that predicted labels # match cluster centers best_labels, best_inertia = \ _labels_inertia(X, x_squared_norms, best_centers, precompute_distances=precompute_distances, distances=distances) return best_labels, best_inertia, best_centers, i + 1 def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances): """Compute labels and inertia using a full distance matrix. This will overwrite the 'distances' array in-place. Parameters ---------- X : numpy array, shape (n_sample, n_features) Input data. x_squared_norms : numpy array, shape (n_samples,) Precomputed squared norms of X. centers : numpy array, shape (n_clusters, n_features) Cluster centers which data is assigned to. distances : numpy array, shape (n_samples,) Pre-allocated array in which distances are stored. Returns ------- labels : numpy array, dtype=np.int, shape (n_samples,) Indices of clusters that samples are assigned to. inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] k = centers.shape[0] all_distances = euclidean_distances(centers, X, x_squared_norms, squared=True) labels = np.empty(n_samples, dtype=np.int32) labels.fill(-1) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(k): dist = all_distances[center_id] labels[dist < mindist] = center_id mindist = np.minimum(dist, mindist) if n_samples == distances.shape[0]: # distances will be changed in-place distances[:] = mindist inertia = mindist.sum() return labels, inertia def _labels_inertia(X, x_squared_norms, centers, precompute_distances=True, distances=None): """E step of the K-means EM algorithm. Compute the labels and the inertia of the given samples and centers. This will compute the distances in-place. Parameters ---------- X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features) The input samples to assign to the labels. x_squared_norms: array, shape (n_samples,) Precomputed squared euclidean norm of each data point, to speed up computations. centers: float array, shape (k, n_features) The cluster centers. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). distances: float array, shape (n_samples,) Pre-allocated array to be filled in with each sample's distance to the closest center. Returns ------- labels: int array of shape(n) The resulting assignment inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] # set the default value of centers to -1 to be able to detect any anomaly # easily labels = -np.ones(n_samples, np.int32) if distances is None: distances = np.zeros(shape=(0,), dtype=X.dtype) # distances will be changed in-place if sp.issparse(X): inertia = _k_means._assign_labels_csr( X, x_squared_norms, centers, labels, distances=distances) else: if precompute_distances: return _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances) inertia = _k_means._assign_labels_array( X, x_squared_norms, centers, labels, distances=distances) return labels, inertia def _init_centroids(X, k, init, random_state=None, x_squared_norms=None, init_size=None): """Compute the initial centroids Parameters ---------- X: array, shape (n_samples, n_features) k: int number of centroids init: {'k-means++', 'random' or ndarray or callable} optional Method for initialization random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. x_squared_norms: array, shape (n_samples,), optional Squared euclidean norm of each data point. Pass it if you have it at hands already to avoid it being recomputed here. Default: None init_size : int, optional Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than k. Returns ------- centers: array, shape(k, n_features) """ random_state = check_random_state(random_state) n_samples = X.shape[0] if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) if init_size is not None and init_size < n_samples: if init_size < k: warnings.warn( "init_size=%d should be larger than k=%d. " "Setting it to 3*k" % (init_size, k), RuntimeWarning, stacklevel=2) init_size = 3 * k init_indices = random_state.randint(0, n_samples, init_size) X = X[init_indices] x_squared_norms = x_squared_norms[init_indices] n_samples = X.shape[0] elif n_samples < k: raise ValueError( "n_samples=%d should be larger than k=%d" % (n_samples, k)) if isinstance(init, string_types) and init == 'k-means++': centers = _k_init(X, k, random_state=random_state, x_squared_norms=x_squared_norms) elif isinstance(init, string_types) and init == 'random': seeds = random_state.permutation(n_samples)[:k] centers = X[seeds] elif hasattr(init, '__array__'): # ensure that the centers have the same dtype as X # this is a requirement of fused types of cython centers = np.array(init, dtype=X.dtype) elif callable(init): centers = init(X, k, random_state=random_state) centers = np.asarray(centers, dtype=X.dtype) else: raise ValueError("the init parameter for the k-means should " "be 'k-means++' or 'random' or an ndarray, " "'%s' (type '%s') was passed." % (init, type(init))) if sp.issparse(centers): centers = centers.toarray() _validate_center_shape(X, k, centers) return centers class KMeans(BaseEstimator, ClusterMixin, TransformerMixin): """K-Means clustering Read more in the :ref:`User Guide <k_means>`. Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random' or an ndarray} Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. algorithm : "auto", "full" or "elkan", default="auto" K-means algorithm to use. The classical EM-style algorithm is "full". The "elkan" variation is more efficient by using the triangle inequality, but currently doesn't support sparse data. "auto" chooses "elkan" for dense data and "full" for sparse data. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, default: 1e-4 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point inertia_ : float Sum of distances of samples to their closest cluster center. Examples -------- >>> from sklearn.cluster import KMeans >>> import numpy as np >>> X = np.array([[1, 2], [1, 4], [1, 0], ... [4, 2], [4, 4], [4, 0]]) >>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X) >>> kmeans.labels_ array([0, 0, 0, 1, 1, 1], dtype=int32) >>> kmeans.predict([[0, 0], [4, 4]]) array([0, 1], dtype=int32) >>> kmeans.cluster_centers_ array([[ 1., 2.], [ 4., 2.]]) See also -------- MiniBatchKMeans Alternative online implementation that does incremental updates of the centers positions using mini-batches. For large scale learning (say n_samples > 10k) MiniBatchKMeans is probably much faster than the default batch implementation. Notes ------ The k-means problem is solved using Lloyd's algorithm. The average complexity is given by O(k n T), were n is the number of samples and T is the number of iteration. The worst case complexity is given by O(n^(k+2/p)) with n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii, 'How slow is the k-means method?' SoCG2006) In practice, the k-means algorithm is very fast (one of the fastest clustering algorithms available), but it falls in local minima. That's why it can be useful to restart it several times. """ def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300, tol=1e-4, precompute_distances='auto', verbose=0, random_state=None, copy_x=True, n_jobs=1, algorithm='auto'): self.n_clusters = n_clusters self.init = init self.max_iter = max_iter self.tol = tol self.precompute_distances = precompute_distances self.n_init = n_init self.verbose = verbose self.random_state = random_state self.copy_x = copy_x self.n_jobs = n_jobs self.algorithm = algorithm def _check_fit_data(self, X): """Verify that the number of samples given is larger than k""" X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32]) if X.shape[0] < self.n_clusters: raise ValueError("n_samples=%d should be >= n_clusters=%d" % ( X.shape[0], self.n_clusters)) return X def _check_test_data(self, X): X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES) n_samples, n_features = X.shape expected_n_features = self.cluster_centers_.shape[1] if not n_features == expected_n_features: raise ValueError("Incorrect number of features. " "Got %d features, expected %d" % ( n_features, expected_n_features)) return X def fit(self, X, y=None): """Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ random_state = check_random_state(self.random_state) X = self._check_fit_data(X) self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \ k_means( X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, precompute_distances=self.precompute_distances, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, algorithm=self.algorithm, return_n_iter=True) return self def fit_predict(self, X, y=None): """Compute cluster centers and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). """ return self.fit(X).labels_ def fit_transform(self, X, y=None): """Compute clustering and transform X to cluster-distance space. Equivalent to fit(X).transform(X), but more efficiently implemented. """ # Currently, this just skips a copy of the data if it is not in # np.array or CSR format already. # XXX This skips _check_test_data, which may change the dtype; # we should refactor the input validation. X = self._check_fit_data(X) return self.fit(X)._transform(X) def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._transform(X) def _transform(self, X): """guts of transform method; no input validation""" return euclidean_distances(X, self.cluster_centers_) def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0] def score(self, X, y=None): """Opposite of the value of X on the K-means objective. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data. Returns ------- score : float Opposite of the value of X on the K-means objective. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1] def _mini_batch_step(X, x_squared_norms, centers, counts, old_center_buffer, compute_squared_diff, distances, random_reassign=False, random_state=None, reassignment_ratio=.01, verbose=False): """Incremental update of the centers for the Minibatch K-Means algorithm. Parameters ---------- X : array, shape (n_samples, n_features) The original data array. x_squared_norms : array, shape (n_samples,) Squared euclidean norm of each data point. centers : array, shape (k, n_features) The cluster centers. This array is MODIFIED IN PLACE counts : array, shape (k,) The vector in which we keep track of the numbers of elements in a cluster. This array is MODIFIED IN PLACE distances : array, dtype float, shape (n_samples), optional If not None, should be a pre-allocated array that will be used to store the distances of each sample to its closest center. May not be None when random_reassign is True. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. random_reassign : boolean, optional If True, centers with very low counts are randomly reassigned to observations. reassignment_ratio : float, optional Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more likely to be reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : bool, optional, default False Controls the verbosity. compute_squared_diff : bool If set to False, the squared diff computation is skipped. old_center_buffer : int Copy of old centers for monitoring convergence. Returns ------- inertia : float Sum of distances of samples to their closest cluster center. squared_diff : numpy array, shape (n_clusters,) Squared distances between previous and updated cluster centers. """ # Perform label assignment to nearest centers nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers, distances=distances) if random_reassign and reassignment_ratio > 0: random_state = check_random_state(random_state) # Reassign clusters that have very low counts to_reassign = counts < reassignment_ratio * counts.max() # pick at most .5 * batch_size samples as new centers if to_reassign.sum() > .5 * X.shape[0]: indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):] to_reassign[indices_dont_reassign] = False n_reassigns = to_reassign.sum() if n_reassigns: # Pick new clusters amongst observations with uniform probability new_centers = choice(X.shape[0], replace=False, size=n_reassigns, random_state=random_state) if verbose: print("[MiniBatchKMeans] Reassigning %i cluster centers." % n_reassigns) if sp.issparse(X) and not sp.issparse(centers): assign_rows_csr(X, astype(new_centers, np.intp), astype(np.where(to_reassign)[0], np.intp), centers) else: centers[to_reassign] = X[new_centers] # reset counts of reassigned centers, but don't reset them too small # to avoid instant reassignment. This is a pretty dirty hack as it # also modifies the learning rates. counts[to_reassign] = np.min(counts[~to_reassign]) # implementation for the sparse CSR representation completely written in # cython if sp.issparse(X): return inertia, _k_means._mini_batch_update_csr( X, x_squared_norms, centers, counts, nearest_center, old_center_buffer, compute_squared_diff) # dense variant in mostly numpy (not as memory efficient though) k = centers.shape[0] squared_diff = 0.0 for center_idx in range(k): # find points from minibatch that are assigned to this center center_mask = nearest_center == center_idx count = center_mask.sum() if count > 0: if compute_squared_diff: old_center_buffer[:] = centers[center_idx] # inplace remove previous count scaling centers[center_idx] *= counts[center_idx] # inplace sum with new points members of this cluster centers[center_idx] += np.sum(X[center_mask], axis=0) # update the count statistics for this center counts[center_idx] += count # inplace rescale to compute mean of all points (old and new) # Note: numpy >= 1.10 does not support '/=' for the following # expression for a mixture of int and float (see numpy issue #6464) centers[center_idx] = centers[center_idx] / counts[center_idx] # update the squared diff if necessary if compute_squared_diff: diff = centers[center_idx].ravel() - old_center_buffer.ravel() squared_diff += np.dot(diff, diff) return inertia, squared_diff def _mini_batch_convergence(model, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, context, verbose=0): """Helper function to encapsulate the early stopping logic""" # Normalize inertia to be able to compare values when # batch_size changes batch_inertia /= model.batch_size centers_squared_diff /= model.batch_size # Compute an Exponentially Weighted Average of the squared # diff to monitor the convergence while discarding # minibatch-local stochastic variability: # https://en.wikipedia.org/wiki/Moving_average ewa_diff = context.get('ewa_diff') ewa_inertia = context.get('ewa_inertia') if ewa_diff is None: ewa_diff = centers_squared_diff ewa_inertia = batch_inertia else: alpha = float(model.batch_size) * 2.0 / (n_samples + 1) alpha = 1.0 if alpha > 1.0 else alpha ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha # Log progress to be able to monitor convergence if verbose: progress_msg = ( 'Minibatch iteration %d/%d:' ' mean batch inertia: %f, ewa inertia: %f ' % ( iteration_idx + 1, n_iter, batch_inertia, ewa_inertia)) print(progress_msg) # Early stopping based on absolute tolerance on squared change of # centers position (using EWA smoothing) if tol > 0.0 and ewa_diff <= tol: if verbose: print('Converged (small centers change) at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # Early stopping heuristic due to lack of improvement on smoothed inertia ewa_inertia_min = context.get('ewa_inertia_min') no_improvement = context.get('no_improvement', 0) if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min: no_improvement = 0 ewa_inertia_min = ewa_inertia else: no_improvement += 1 if (model.max_no_improvement is not None and no_improvement >= model.max_no_improvement): if verbose: print('Converged (lack of improvement in inertia)' ' at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # update the convergence context to maintain state across successive calls: context['ewa_diff'] = ewa_diff context['ewa_inertia'] = ewa_inertia context['ewa_inertia_min'] = ewa_inertia_min context['no_improvement'] = no_improvement return False class MiniBatchKMeans(KMeans): """Mini-Batch K-Means clustering Read more in the :ref:`User Guide <mini_batch_kmeans>`. Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics. max_no_improvement : int, default: 10 Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed inertia. To disable convergence detection based on inertia, set max_no_improvement to None. tol : float, default: 0.0 Control early stopping based on the relative center changes as measured by a smoothed, variance-normalized of the mean center squared position changes. This early stopping heuristics is closer to the one used for the batch variant of the algorithms but induces a slight computational and memory overhead over the inertia heuristic. To disable convergence detection based on normalized center change, set tol to 0.0 (default). batch_size : int, optional, default: 100 Size of the mini batches. init_size : int, optional, default: 3 * batch_size Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than n_clusters. init : {'k-means++', 'random' or an ndarray}, default: 'k-means++' Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. n_init : int, default=3 Number of random initializations that are tried. In contrast to KMeans, the algorithm is only run once, using the best of the ``n_init`` initializations as measured by inertia. compute_labels : boolean, default=True Compute label assignment and inertia for the complete dataset once the minibatch optimization has converged in fit. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. reassignment_ratio : float, default: 0.01 Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more easily reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : boolean, optional Verbosity mode. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point (if compute_labels is set to True). inertia_ : float The value of the inertia criterion associated with the chosen partition (if compute_labels is set to True). The inertia is defined as the sum of square distances of samples to their nearest neighbor. See also -------- KMeans The classic implementation of the clustering method based on the Lloyd's algorithm. It consumes the whole set of input data at each iteration. Notes ----- See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf """ def __init__(self, n_clusters=8, init='k-means++', max_iter=100, batch_size=100, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init=3, reassignment_ratio=0.01): super(MiniBatchKMeans, self).__init__( n_clusters=n_clusters, init=init, max_iter=max_iter, verbose=verbose, random_state=random_state, tol=tol, n_init=n_init) self.max_no_improvement = max_no_improvement self.batch_size = batch_size self.compute_labels = compute_labels self.init_size = init_size self.reassignment_ratio = reassignment_ratio def fit(self, X, y=None): """Compute the centroids on X by chunking it into mini-batches. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster """ random_state = check_random_state(self.random_state) X = check_array(X, accept_sparse="csr", order='C', dtype=[np.float64, np.float32]) n_samples, n_features = X.shape if n_samples < self.n_clusters: raise ValueError("Number of samples smaller than number " "of clusters.") n_init = self.n_init if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=X.dtype) if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in MiniBatchKMeans instead of ' 'n_init=%d' % self.n_init, RuntimeWarning, stacklevel=2) n_init = 1 x_squared_norms = row_norms(X, squared=True) if self.tol > 0.0: tol = _tolerance(X, self.tol) # using tol-based early stopping needs the allocation of a # dedicated before which can be expensive for high dim data: # hence we allocate it outside of the main loop old_center_buffer = np.zeros(n_features, dtype=X.dtype) else: tol = 0.0 # no need for the center buffer if tol-based early stopping is # disabled old_center_buffer = np.zeros(0, dtype=X.dtype) distances = np.zeros(self.batch_size, dtype=X.dtype) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) n_iter = int(self.max_iter * n_batches) init_size = self.init_size if init_size is None: init_size = 3 * self.batch_size if init_size > n_samples: init_size = n_samples self.init_size_ = init_size validation_indices = random_state.randint(0, n_samples, init_size) X_valid = X[validation_indices] x_squared_norms_valid = x_squared_norms[validation_indices] # perform several inits with random sub-sets best_inertia = None for init_idx in range(n_init): if self.verbose: print("Init %d/%d with method: %s" % (init_idx + 1, n_init, self.init)) counts = np.zeros(self.n_clusters, dtype=np.int32) # TODO: once the `k_means` function works with sparse input we # should refactor the following init to use it instead. # Initialize the centers using only a fraction of the data as we # expect n_samples to be very large when using MiniBatchKMeans cluster_centers = _init_centroids( X, self.n_clusters, self.init, random_state=random_state, x_squared_norms=x_squared_norms, init_size=init_size) # Compute the label assignment on the init dataset batch_inertia, centers_squared_diff = _mini_batch_step( X_valid, x_squared_norms[validation_indices], cluster_centers, counts, old_center_buffer, False, distances=None, verbose=self.verbose) # Keep only the best cluster centers across independent inits on # the common validation set _, inertia = _labels_inertia(X_valid, x_squared_norms_valid, cluster_centers) if self.verbose: print("Inertia for init %d/%d: %f" % (init_idx + 1, n_init, inertia)) if best_inertia is None or inertia < best_inertia: self.cluster_centers_ = cluster_centers self.counts_ = counts best_inertia = inertia # Empty context to be used inplace by the convergence check routine convergence_context = {} # Perform the iterative optimization until the final convergence # criterion for iteration_idx in range(n_iter): # Sample a minibatch from the full dataset minibatch_indices = random_state.randint( 0, n_samples, self.batch_size) # Perform the actual update step on the minibatch data batch_inertia, centers_squared_diff = _mini_batch_step( X[minibatch_indices], x_squared_norms[minibatch_indices], self.cluster_centers_, self.counts_, old_center_buffer, tol > 0.0, distances=distances, # Here we randomly choose whether to perform # random reassignment: the choice is done as a function # of the iteration index, and the minimum number of # counts, in order to force this reassignment to happen # every once in a while random_reassign=((iteration_idx + 1) % (10 + self.counts_.min()) == 0), random_state=random_state, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) # Monitor convergence and do early stopping if necessary if _mini_batch_convergence( self, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, convergence_context, verbose=self.verbose): break self.n_iter_ = iteration_idx + 1 if self.compute_labels: self.labels_, self.inertia_ = self._labels_inertia_minibatch(X) return self def _labels_inertia_minibatch(self, X): """Compute labels and inertia using mini batches. This is slightly slower than doing everything at once but preventes memory errors / segfaults. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- labels : array, shap (n_samples,) Cluster labels for each point. inertia : float Sum of squared distances of points to nearest cluster. """ if self.verbose: print('Computing label assignment and total inertia') x_squared_norms = row_norms(X, squared=True) slices = gen_batches(X.shape[0], self.batch_size) results = [_labels_inertia(X[s], x_squared_norms[s], self.cluster_centers_) for s in slices] labels, inertia = zip(*results) return np.hstack(labels), np.sum(inertia) def partial_fit(self, X, y=None): """Update k means estimate on a single mini-batch X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster. """ X = check_array(X, accept_sparse="csr") n_samples, n_features = X.shape if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=X.dtype) if n_samples == 0: return self x_squared_norms = row_norms(X, squared=True) self.random_state_ = getattr(self, "random_state_", check_random_state(self.random_state)) if (not hasattr(self, 'counts_') or not hasattr(self, 'cluster_centers_')): # this is the first call partial_fit on this object: # initialize the cluster centers self.cluster_centers_ = _init_centroids( X, self.n_clusters, self.init, random_state=self.random_state_, x_squared_norms=x_squared_norms, init_size=self.init_size) self.counts_ = np.zeros(self.n_clusters, dtype=np.int32) random_reassign = False distances = None else: # The lower the minimum count is, the more we do random # reassignment, however, we don't want to do random # reassignment too often, to allow for building up counts random_reassign = self.random_state_.randint( 10 * (1 + self.counts_.min())) == 0 distances = np.zeros(X.shape[0], dtype=X.dtype) _mini_batch_step(X, x_squared_norms, self.cluster_centers_, self.counts_, np.zeros(0, dtype=X.dtype), 0, random_reassign=random_reassign, distances=distances, random_state=self.random_state_, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) if self.compute_labels: self.labels_, self.inertia_ = _labels_inertia( X, x_squared_norms, self.cluster_centers_) return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._labels_inertia_minibatch(X)[0]
bsd-3-clause
aashish24/seaborn
seaborn/rcmod.py
4
14969
"""Functions that alter the matplotlib rc dictionary on the fly.""" import numpy as np import matplotlib as mpl from . import palettes _style_keys = ( "axes.facecolor", "axes.edgecolor", "axes.grid", "axes.axisbelow", "axes.linewidth", "axes.labelcolor", "grid.color", "grid.linestyle", "text.color", "xtick.color", "ytick.color", "xtick.direction", "ytick.direction", "xtick.major.size", "ytick.major.size", "xtick.minor.size", "ytick.minor.size", "legend.frameon", "legend.numpoints", "legend.scatterpoints", "lines.solid_capstyle", "image.cmap", "font.family", "font.sans-serif", ) _context_keys = ( "figure.figsize", "axes.labelsize", "axes.titlesize", "xtick.labelsize", "ytick.labelsize", "legend.fontsize", "grid.linewidth", "lines.linewidth", "patch.linewidth", "lines.markersize", "lines.markeredgewidth", "xtick.major.width", "ytick.major.width", "xtick.minor.width", "ytick.minor.width", "xtick.major.pad", "ytick.major.pad" ) def set(context="notebook", style="darkgrid", palette="deep", font="sans-serif", font_scale=1, rc=None): """Set aesthetic parameters in one step. Each set of parameters can be set directly or temporarily, see the referenced functions below for more information. Parameters ---------- context : string or dict Plotting context parameters, see :func:`plotting_context` style : string or dict Axes style parameters, see :func:`axes_style` palette : string or sequence Color palette, see :func:`color_palette` font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict or None Dictionary of rc parameter mappings to override the above. """ set_context(context, font_scale) set_style(style, rc={"font.family": font}) set_palette(palette) if rc is not None: mpl.rcParams.update(rc) def reset_defaults(): """Restore all RC params to default settings.""" mpl.rcParams.update(mpl.rcParamsDefault) def reset_orig(): """Restore all RC params to original settings (respects custom rc).""" mpl.rcParams.update(mpl.rcParamsOrig) class _AxesStyle(dict): """Light wrapper on a dict to set style temporarily.""" def __enter__(self): """Open the context.""" rc = mpl.rcParams self._orig_style = {k: rc[k] for k in _style_keys} set_style(self) return self def __exit__(self, *args): """Close the context.""" set_style(self._orig_style) class _PlottingContext(dict): """Light wrapper on a dict to set context temporarily.""" def __enter__(self): """Open the context.""" rc = mpl.rcParams self._orig_context = {k: rc[k] for k in _context_keys} set_context(self) return self def __exit__(self, *args): """Close the context.""" set_context(self._orig_context) def axes_style(style=None, rc=None): """Return a parameter dict for the aesthetic style of the plots. This affects things like the color of the axes, whether a grid is enabled by default, and other aesthetic elements. This function returns an object that can be used in a ``with`` statement to temporarily change the style parameters. Parameters ---------- style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured set. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- >>> st = axes_style("whitegrid") >>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8}) >>> import matplotlib.pyplot as plt >>> with axes_style("white"): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_style : set the matplotlib parameters for a seaborn theme plotting_context : return a parameter dict to to scale plot elements color_palette : define the color palette for a plot """ if style is None: style_dict = {k: mpl.rcParams[k] for k in _style_keys} elif isinstance(style, dict): style_dict = style else: styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"] if style not in styles: raise ValueError("style must be one of %s" % ", ".join(styles)) # Define colors here dark_gray = ".15" light_gray = ".8" # Common parameters style_dict = { "text.color": dark_gray, "axes.labelcolor": dark_gray, "legend.frameon": False, "legend.numpoints": 1, "legend.scatterpoints": 1, "xtick.direction": "out", "ytick.direction": "out", "xtick.color": dark_gray, "ytick.color": dark_gray, "axes.axisbelow": True, "image.cmap": "Greys", "font.family": ["sans-serif"], "font.sans-serif": ["Arial", "Liberation Sans", "Bitstream Vera Sans", "sans-serif"], "grid.linestyle": "-", "lines.solid_capstyle": "round", } # Set grid on or off if "grid" in style: style_dict.update({ "axes.grid": True, }) else: style_dict.update({ "axes.grid": False, }) # Set the color of the background, spines, and grids if style.startswith("dark"): style_dict.update({ "axes.facecolor": "#EAEAF2", "axes.edgecolor": "white", "axes.linewidth": 0, "grid.color": "white", }) elif style == "whitegrid": style_dict.update({ "axes.facecolor": "white", "axes.edgecolor": light_gray, "axes.linewidth": 1, "grid.color": light_gray, }) elif style in ["white", "ticks"]: style_dict.update({ "axes.facecolor": "white", "axes.edgecolor": dark_gray, "axes.linewidth": 1.25, "grid.color": light_gray, }) # Show or hide the axes ticks if style == "ticks": style_dict.update({ "xtick.major.size": 6, "ytick.major.size": 6, "xtick.minor.size": 3, "ytick.minor.size": 3, }) else: style_dict.update({ "xtick.major.size": 0, "ytick.major.size": 0, "xtick.minor.size": 0, "ytick.minor.size": 0, }) # Override these settings with the provided rc dictionary if rc is not None: rc = {k: v for k, v in rc.items() if k in _style_keys} style_dict.update(rc) # Wrap in an _AxesStyle object so this can be used in a with statement style_object = _AxesStyle(style_dict) return style_object def set_style(style=None, rc=None): """Set the aesthetic style of the plots. This affects things like the color of the axes, whether a grid is enabled by default, and other aesthetic elements. Parameters ---------- style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured set. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- >>> set_style("whitegrid") >>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8}) See Also -------- axes_style : return a dict of parameters or use in a ``with`` statement to temporarily set the style. set_context : set parameters to scale plot elements set_palette : set the default color palette for figures """ style_object = axes_style(style, rc) mpl.rcParams.update(style_object) def plotting_context(context=None, font_scale=1, rc=None): """Return a parameter dict to scale elements of the figure. This affects things like the size of the labels, lines, and other elements of the plot, but not the overall style. The base context is "notebook", and the other contexts are "paper", "talk", and "poster", which are version of the notebook parameters scaled by .8, 1.3, and 1.6, respectively. This function returns an object that can be used in a ``with`` statement to temporarily change the context parameters. Parameters ---------- context : dict, None, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- >>> c = plotting_context("poster") >>> c = plotting_context("notebook", font_scale=1.5) >>> c = plotting_context("talk", rc={"lines.linewidth": 2}) >>> import matplotlib.pyplot as plt >>> with plotting_context("paper"): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_context : set the matplotlib parameters to scale plot elements axes_style : return a dict of parameters defining a figure style color_palette : define the color palette for a plot """ if context is None: context_dict = {k: mpl.rcParams[k] for k in _context_keys} elif isinstance(context, dict): context_dict = context else: contexts = ["paper", "notebook", "talk", "poster"] if context not in contexts: raise ValueError("context must be in %s" % ", ".join(contexts)) # Set up dictionary of default parameters base_context = { "figure.figsize": np.array([8, 5.5]), "axes.labelsize": 11, "axes.titlesize": 12, "xtick.labelsize": 10, "ytick.labelsize": 10, "legend.fontsize": 10, "grid.linewidth": 1, "lines.linewidth": 1.75, "patch.linewidth": .3, "lines.markersize": 7, "lines.markeredgewidth": 0, "xtick.major.width": 1, "ytick.major.width": 1, "xtick.minor.width": .5, "ytick.minor.width": .5, "xtick.major.pad": 7, "ytick.major.pad": 7, } # Scale all the parameters by the same factor depending on the context scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context] context_dict = {k: v * scaling for k, v in base_context.items()} # Now independently scale the fonts font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize", "xtick.labelsize", "ytick.labelsize"] font_dict = {k: context_dict[k] * font_scale for k in font_keys} context_dict.update(font_dict) # Implement hack workaround for matplotlib bug # See https://github.com/mwaskom/seaborn/issues/344 # There is a bug in matplotlib 1.4.2 that makes points invisible when # they don't have an edgewidth. It will supposedly be fixed in 1.4.3. if mpl.__version__ == "1.4.2": context_dict["lines.markeredgewidth"] = 0.01 # Override these settings with the provided rc dictionary if rc is not None: rc = {k: v for k, v in rc.items() if k in _context_keys} context_dict.update(rc) # Wrap in a _PlottingContext object so this can be used in a with statement context_object = _PlottingContext(context_dict) return context_object def set_context(context=None, font_scale=1, rc=None): """Set the plotting context parameters. This affects things like the size of the labels, lines, and other elements of the plot, but not the overall style. The base context is "notebook", and the other contexts are "paper", "talk", and "poster", which are version of the notebook parameters scaled by .8, 1.3, and 1.6, respectively. Parameters ---------- context : dict, None, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- >>> set_context("paper") >>> set_context("talk", font_scale=1.4) >>> set_context("talk", rc={"lines.linewidth": 2}) See Also -------- plotting_context : return a dictionary of rc parameters, or use in a ``with`` statement to temporarily set the context. set_style : set the default parameters for figure style set_palette : set the default color palette for figures """ context_object = plotting_context(context, font_scale, rc) mpl.rcParams.update(context_object) def set_palette(name, n_colors=6, desat=None): """Set the matplotlib color cycle using a seaborn palette. Parameters ---------- name : hls | husl | matplotlib colormap | seaborn color palette Palette definition. Should be something that :func:`color_palette` can process. n_colors : int Number of colors in the cycle. desat : float Factor to desaturate each color by. Examples -------- >>> set_palette("Reds") >>> set_palette("Set1", 8, .75) See Also -------- color_palette : build a color palette or set the color cycle temporarily in a ``with`` statement. set_context : set parameters to scale plot elements set_style : set the default parameters for figure style """ colors = palettes.color_palette(name, n_colors, desat) mpl.rcParams["axes.color_cycle"] = list(colors) mpl.rcParams["patch.facecolor"] = colors[0]
bsd-3-clause
TomAugspurger/pandas
asv_bench/benchmarks/io/excel.py
3
1891
from io import BytesIO import numpy as np from odf.opendocument import OpenDocumentSpreadsheet from odf.table import Table, TableCell, TableRow from odf.text import P from pandas import DataFrame, ExcelWriter, date_range, read_excel from ..pandas_vb_common import tm def _generate_dataframe(): N = 2000 C = 5 df = DataFrame( np.random.randn(N, C), columns=[f"float{i}" for i in range(C)], index=date_range("20000101", periods=N, freq="H"), ) df["object"] = tm.makeStringIndex(N) return df class WriteExcel: params = ["openpyxl", "xlsxwriter", "xlwt"] param_names = ["engine"] def setup(self, engine): self.df = _generate_dataframe() def time_write_excel(self, engine): bio = BytesIO() bio.seek(0) writer = ExcelWriter(bio, engine=engine) self.df.to_excel(writer, sheet_name="Sheet1") writer.save() class ReadExcel: params = ["xlrd", "openpyxl", "odf"] param_names = ["engine"] fname_excel = "spreadsheet.xlsx" fname_odf = "spreadsheet.ods" def _create_odf(self): doc = OpenDocumentSpreadsheet() table = Table(name="Table1") for row in self.df.values: tr = TableRow() for val in row: tc = TableCell(valuetype="string") tc.addElement(P(text=val)) tr.addElement(tc) table.addElement(tr) doc.spreadsheet.addElement(table) doc.save(self.fname_odf) def setup_cache(self): self.df = _generate_dataframe() self.df.to_excel(self.fname_excel, sheet_name="Sheet1") self._create_odf() def time_read_excel(self, engine): fname = self.fname_odf if engine == "odf" else self.fname_excel read_excel(fname, engine=engine) from ..pandas_vb_common import setup # noqa: F401 isort:skip
bsd-3-clause
xyguo/scikit-learn
sklearn/tree/tests/test_tree.py
32
52369
""" Testing for the tree module (sklearn.tree). """ import pickle from functools import partial from itertools import product import platform import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.validation import check_random_state from sklearn.exceptions import NotFittedError from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree.tree import SPARSE_SPLITTERS from sklearn.tree._tree import TREE_LEAF from sklearn import datasets from sklearn.utils import compute_sample_weight CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", ) CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier, presort=True), "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor, presort=True), "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0) DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "boston": {"X": boston.data, "y": boston.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert_equal(s.node_count, d.node_count, "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_regression_toy(): # Check regression on a toy dataset. for name, Tree in REG_TREES.items(): reg = Tree(random_state=1) reg.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.9, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.5, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_boston(): # Check consistency on dataset boston house prices. for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS): reg = Tree(criterion=criterion, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 1, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) # using fewer features reduces the learning ability of this tree, # but reduces training time. reg = Tree(criterion=criterion, max_features=6, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 2, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10, "Failed with {0}".format(name)) assert_equal(n_important, 3, "Failed with {0}".format(name)) X_new = assert_warns( DeprecationWarning, clf.transform, X, threshold="mean") assert_less(0, X_new.shape[1], "Failed with {0}".format(name)) assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name)) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) @raises(ValueError) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() clf.feature_importances_ def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(boston.data, boston.target) assert_equal(reg.max_features_, boston.data.shape[1]) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert_equal(clf.max_features_, 2) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 3) est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) # use values of max_features that are invalid est = TreeEstimator(max_features=10) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=-1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=0.0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=1.5) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features="foobar") assert_raises(ValueError, est.fit, X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() assert_raises(NotFittedError, est.predict_proba, X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample assert_raises(ValueError, est.predict_proba, X2) for name, TreeEstimator in ALL_TREES.items(): # Invalid values for parameters assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=0.51).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] assert_raises(ValueError, est.fit, X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() assert_raises(NotFittedError, est.predict, T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) assert_raises(ValueError, est.predict, t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) assert_raises(ValueError, est.predict, X) assert_raises(ValueError, est.apply, X) clf = TreeEstimator() clf.fit(X, y) assert_raises(ValueError, clf.predict, Xt) assert_raises(ValueError, clf.apply, Xt) # apply before fitting est = TreeEstimator() assert_raises(NotFittedError, est.apply, T) def test_min_samples_split(): """Test min_samples_split parameter""" X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test for integer parameter est = TreeEstimator(min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert_greater(np.min(node_samples), 9, "Failed with {0}".format(name)) # test for float parameter est = TreeEstimator(min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert_greater(np.min(node_samples), 9, "Failed with {0}".format(name)) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test integer parameter est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) # test float parameter est = TreeEstimator(min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): # Check on dense input for name in ALL_TREES: yield check_min_weight_fraction_leaf, name, "iris" # Check on sparse input for name in SPARSE_TREES: yield check_min_weight_fraction_leaf, name, "multilabel", True def test_pickle(): for name, TreeEstimator in ALL_TREES.items(): if "Classifier" in name: X, y = iris.data, iris.target else: X, y = boston.data, boston.target est = TreeEstimator(random_state=0) est.fit(X, y) score = est.score(X, y) fitted_attribute = dict() for attribute in ["max_depth", "node_count", "capacity"]: fitted_attribute[attribute] = getattr(est.tree_, attribute) serialized_object = pickle.dumps(est) est2 = pickle.loads(serialized_object) assert_equal(type(est2), est.__class__) score2 = est2.score(X, y) assert_equal(score, score2, "Failed to generate same score after pickling " "with {0}".format(name)) for attribute in fitted_attribute: assert_equal(getattr(est2.tree_, attribute), fitted_attribute[attribute], "Failed to generate same attribute {0} after " "pickling with {1}".format(attribute, name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) proba = clf.predict_proba(T) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = clf.predict_log_proba(T) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert_equal(len(clf.n_classes_), 2) assert_equal(len(clf.classes_), 2) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = compute_sample_weight("balanced", unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if not est.presort: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 149.5) sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.array(0) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(101) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(99) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in CLF_TREES: yield check_class_weights, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in CLF_TREES: yield check_class_weight_errors, name def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) tree = est.tree_ assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1) # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test precedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.tree_ assert_greater(tree.max_depth, 1) def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert_true(-3 <= value.flat[0] < 3, 'Array points to arbitrary memory') def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert_equal(est.tree_.max_depth, 0) def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2))) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict(X), 0.5 * np.ones((4, ))) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert_in("float32", str(e)) def test_realloc(): from sklearn.tree._utils import _realloc_test assert_raises(MemoryError, _realloc_test) def test_huge_allocations(): n_bits = int(platform.architecture()[0].rstrip('bit')) X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(Exception, clf.fit, X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(MemoryError, clf.fit, X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "boston"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) def test_sparse_input(): for tree, dataset in product(SPARSE_TREES, ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros")): max_depth = 3 if dataset == "digits" else None yield (check_sparse_input, tree, dataset, max_depth) # Due to numerical instability of MSE and too strict test, we limit the # maximal depth for tree, dataset in product(REG_TREES, ["boston", "reg_small"]): if tree in SPARSE_TREES: yield (check_sparse_input, tree, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_parameters(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_parameters, tree, dataset) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_criterion(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_criterion, tree, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert_greater((X_sparse.data == 0.).sum(), 0) assert_greater((X_sparse_test.data == 0.).sum(), 0) # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.tree_.decision_path(X1).toarray(), d.tree_.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), d.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), s.tree_.decision_path(X1).toarray()) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) def test_explicit_sparse_zeros(): for tree in SPARSE_TREES: yield (check_explicit_sparse_zeros, tree) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) assert_raises(ValueError, est.predict, [X]) @ignore_warnings def test_1d_input(): for name in ALL_TREES: yield check_raise_error_on_1d_input, name def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 0) def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) if not TreeEstimator().presort: _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) def test_min_weight_leaf_split_level(): for name in ALL_TREES: yield check_min_weight_leaf_split_level, name def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def test_public_apply(): for name in ALL_TREES: yield (check_public_apply, name) for name in SPARSE_TREES: yield (check_public_apply_sparse, name) def check_presort_sparse(est, X, y): assert_raises(ValueError, est.fit, X, y) def test_presort_sparse(): ests = (DecisionTreeClassifier(presort=True), DecisionTreeRegressor(presort=True)) sparse_matrices = (csr_matrix, csc_matrix, coo_matrix) y, X = datasets.make_multilabel_classification(random_state=0, n_samples=50, n_features=1, n_classes=20) y = y[:, 0] for est, sparse_matrix in product(ests, sparse_matrices): yield check_presort_sparse, est, sparse_matrix(X), y def test_decision_path_hardcoded(): X = iris.data y = iris.target est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y) node_indicator = est.decision_path(X[:2]).toarray() assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]]) def check_decision_path(name): X = iris.data y = iris.target n_samples = X.shape[0] TreeEstimator = ALL_TREES[name] est = TreeEstimator(random_state=0, max_depth=2) est.fit(X, y) node_indicator_csr = est.decision_path(X) node_indicator = node_indicator_csr.toarray() assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count)) # Assert that leaves index are correct leaves = est.apply(X) leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)] assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) # Ensure only one leave node per sample all_leaves = est.tree_.children_left == TREE_LEAF assert_array_almost_equal(np.dot(node_indicator, all_leaves), np.ones(shape=n_samples)) # Ensure max depth is consistent with sum of indicator max_depth = node_indicator.sum(axis=1).max() assert_less_equal(est.tree_.max_depth, max_depth) def test_decision_path(): for name in ALL_TREES: yield (check_decision_path, name) def check_no_sparse_y_support(name): X, y = X_multilabel, csr_matrix(y_multilabel) TreeEstimator = ALL_TREES[name] assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y) def test_no_sparse_y_support(): # Currently we don't support sparse y for name in ALL_TREES: yield (check_no_sparse_y_support, name)
bsd-3-clause
planetarymike/IDL-Colorbars
IDL_py_test/039_Rainbow_plus_white.py
1
5771
from matplotlib.colors import LinearSegmentedColormap from numpy import nan, inf cm_data = [[0., 0., 0.], [0.0156863, 0., 0.0117647], [0.0352941, 0., 0.027451], [0.0509804, 0., 0.0392157], [0.0705882, 0., 0.054902], [0.0862745, 0., 0.0745098], [0.105882, 0., 0.0901961], [0.121569, 0., 0.109804], [0.141176, 0., 0.12549], [0.156863, 0., 0.14902], [0.176471, 0., 0.168627], [0.196078, 0., 0.188235], [0.227451, 0., 0.231373], [0.239216, 0., 0.247059], [0.25098, 0., 0.266667], [0.266667, 0., 0.282353], [0.270588, 0., 0.301961], [0.282353, 0., 0.317647], [0.290196, 0., 0.337255], [0.301961, 0., 0.356863], [0.309804, 0., 0.372549], [0.313725, 0., 0.392157], [0.321569, 0., 0.407843], [0.32549, 0., 0.427451], [0.329412, 0., 0.462745], [0.337255, 0., 0.478431], [0.341176, 0., 0.498039], [0.345098, 0., 0.517647], [0.337255, 0., 0.533333], [0.341176, 0., 0.552941], [0.341176, 0., 0.568627], [0.341176, 0., 0.588235], [0.333333, 0., 0.603922], [0.329412, 0., 0.623529], [0.329412, 0., 0.639216], [0.329412, 0., 0.658824], [0.309804, 0., 0.694118], [0.305882, 0., 0.713725], [0.301961, 0., 0.729412], [0.298039, 0., 0.74902], [0.278431, 0., 0.764706], [0.27451, 0., 0.784314], [0.266667, 0., 0.8], [0.258824, 0., 0.819608], [0.235294, 0., 0.839216], [0.227451, 0., 0.854902], [0.215686, 0., 0.87451], [0.180392, 0., 0.909804], [0.168627, 0., 0.92549], [0.156863, 0., 0.945098], [0.141176, 0., 0.960784], [0.129412, 0., 0.980392], [0.0980392, 0., 1.], [0.0823529, 0., 1.], [0.0627451, 0., 1.], [0.0470588, 0., 1.], [0.0156863, 0., 1.], [0., 0., 1.], [0., 0.0156863, 1.], [0., 0.0627451, 1.], [0., 0.0823529, 1.], [0., 0.0980392, 1.], [0., 0.113725, 1.], [0., 0.14902, 1.], [0., 0.164706, 1.], [0., 0.180392, 1.], [0., 0.2, 1.], [0., 0.215686, 1.], [0., 0.247059, 1.], [0., 0.262745, 1.], [0., 0.282353, 1.], [0., 0.329412, 1.], [0., 0.34902, 1.], [0., 0.364706, 1.], [0., 0.380392, 1.], [0., 0.415686, 1.], [0., 0.431373, 1.], [0., 0.447059, 1.], [0., 0.466667, 1.], [0., 0.498039, 1.], [0., 0.513725, 1.], [0., 0.529412, 1.], [0., 0.54902, 1.], [0., 0.596078, 1.], [0., 0.615686, 1.], [0., 0.631373, 1.], [0., 0.647059, 1.], [0., 0.682353, 1.], [0., 0.698039, 1.], [0., 0.713725, 1.], [0., 0.733333, 1.], [0., 0.764706, 1.], [0., 0.780392, 1.], [0., 0.796078, 1.], [0., 0.847059, 1.], [0., 0.862745, 1.], [0., 0.882353, 1.], [0., 0.898039, 1.], [0., 0.913725, 1.], [0., 0.94902, 1.], [0., 0.964706, 1.], [0., 0.980392, 1.], [0., 1., 1.], [0., 1., 0.964706], [0., 1., 0.94902], [0., 1., 0.933333], [0., 1., 0.882353], [0., 1., 0.862745], [0., 1., 0.847059], [0., 1., 0.831373], [0., 1., 0.796078], [0., 1., 0.780392], [0., 1., 0.764706], [0., 1., 0.74902], [0., 1., 0.733333], [0., 1., 0.698039], [0., 1., 0.682353], [0., 1., 0.666667], [0., 1., 0.615686], [0., 1., 0.596078], [0., 1., 0.580392], [0., 1., 0.564706], [0., 1., 0.529412], [0., 1., 0.513725], [0., 1., 0.498039], [0., 1., 0.482353], [0., 1., 0.447059], [0., 1., 0.431373], [0., 1., 0.415686], [0., 1., 0.4], [0., 1., 0.34902], [0., 1., 0.329412], [0., 1., 0.313725], [0., 1., 0.298039], [0., 1., 0.262745], [0., 1., 0.247059], [0., 1., 0.231373], [0., 1., 0.215686], [0., 1., 0.180392], [0., 1., 0.164706], [0., 1., 0.14902], [0., 1., 0.0980392], [0., 1., 0.0823529], [0., 1., 0.0627451], [0., 1., 0.0470588], [0., 1., 0.0313725], [0., 1., 0.], [0.0156863, 1., 0.], [0.0313725, 1., 0.], [0.0470588, 1., 0.], [0.0823529, 1., 0.], [0.0980392, 1., 0.], [0.113725, 1., 0.], [0.164706, 1., 0.], [0.180392, 1., 0.], [0.2, 1., 0.], [0.215686, 1., 0.], [0.247059, 1., 0.], [0.262745, 1., 0.], [0.282353, 1., 0.], [0.298039, 1., 0.], [0.313725, 1., 0.], [0.34902, 1., 0.], [0.364706, 1., 0.], [0.380392, 1., 0.], [0.431373, 1., 0.], [0.447059, 1., 0.], [0.466667, 1., 0.], [0.482353, 1., 0.], [0.513725, 1., 0.], [0.529412, 1., 0.], [0.54902, 1., 0.], [0.564706, 1., 0.], [0.6, 1., 0.], [0.615686, 1., 0.], [0.631373, 1., 0.], [0.647059, 1., 0.], [0.698039, 1., 0.], [0.713725, 1., 0.], [0.733333, 1., 0.], [0.74902, 1., 0.], [0.780392, 1., 0.], [0.796078, 1., 0.], [0.815686, 1., 0.], [0.831373, 1., 0.], [0.866667, 1., 0.], [0.882353, 1., 0.], [0.898039, 1., 0.], [0.94902, 1., 0.], [0.964706, 1., 0.], [0.980392, 1., 0.], [1., 1., 0.], [1., 0.980392, 0.], [1., 0.94902, 0.], [1., 0.933333, 0.], [1., 0.913725, 0.], [1., 0.898039, 0.], [1., 0.866667, 0.], [1., 0.847059, 0.], [1., 0.831373, 0.], [1., 0.780392, 0.], [1., 0.764706, 0.], [1., 0.74902, 0.], [1., 0.733333, 0.], [1., 0.698039, 0.], [1., 0.682353, 0.], [1., 0.666667, 0.], [1., 0.647059, 0.], [1., 0.631373, 0.], [1., 0.6, 0.], [1., 0.580392, 0.], [1., 0.564706, 0.], [1., 0.513725, 0.], [1., 0.498039, 0.], [1., 0.482353, 0.], [1., 0.466667, 0.], [1., 0.431373, 0.], [1., 0.415686, 0.], [1., 0.4, 0.], [1., 0.380392, 0.], [1., 0.34902, 0.], [1., 0.333333, 0.], [1., 0.313725, 0.], [1., 0.298039, 0.], [1., 0.247059, 0.], [1., 0.231373, 0.], [1., 0.215686, 0.], [1., 0.2, 0.], [1., 0.164706, 0.], [1., 0.14902, 0.], [1., 0.133333, 0.], [1., 0.113725, 0.], [1., 0.0823529, 0.], [1., 0.0666667, 0.], [1., 0.0470588, 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 0., 0.], [1., 1., 1.]] test_cm = LinearSegmentedColormap.from_list(__file__, cm_data) if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np try: from pycam02ucs.cm.viscm import viscm viscm(test_cm) except ImportError: print("pycam02ucs not found, falling back on simple display") plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=test_cm) plt.show()
gpl-2.0
cowlicks/odo
odo/backends/tests/test_sas.py
9
2041
from __future__ import absolute_import, division, print_function import pytest sas7bdat = pytest.importorskip('sas7bdat') pytest.importorskip('odo.backends.sas') import os import pandas as pd from collections import Iterator from sas7bdat import SAS7BDAT from odo.backends.sas import discover, sas_to_iterator from odo.utils import tmpfile, into_path from odo import append, convert, resource, dshape test_path = into_path('backends', 'tests', 'airline.sas7bdat') sasfile = SAS7BDAT(test_path) columns = ("DATE", "AIR", "mon1", "mon2", "mon3", "mon4", "mon5", "mon6", "mon7", "mon8", "mon9", "mon10", "mon11", "mon12", "t", "Lair") ds = dshape('''var * {DATE: date, AIR: float64, mon1: float64, mon2: float64, mon3: float64, mon4: float64, mon5: float64, mon6: float64, mon7: float64, mon8: float64, mon9: float64, mon10: float64, mon11: float64, mon12: float64, t: float64, Lair: float64}''') def test_resource_sas7bdat(): assert isinstance(resource(test_path), SAS7BDAT) def test_discover_sas(): assert discover(sasfile) == ds def test_convert_sas_to_dataframe(): df = convert(pd.DataFrame, sasfile) assert isinstance(df, pd.DataFrame) # pandas doesn't support date expected = str(ds.measure).replace('date', 'datetime') assert str(discover(df).measure).replace('?', '') == expected def test_convert_sas_to_list(): out = convert(list, sasfile) assert isinstance(out, list) assert not any(isinstance(item, str) for item in out[0]) # No header assert all(isinstance(ln, list) for ln in out) def test_convert_sas_to_iterator(): itr = sas_to_iterator(sasfile) assert isinstance(itr, Iterator) def test_append_sas_to_sqlite_round_trip(): expected = convert(set, sasfile) with tmpfile('db') as fn: r = resource('sqlite:///%s::SAS' % fn, dshape=discover(sasfile)) append(r, sasfile) result = convert(set, r) assert expected == result
bsd-3-clause
leesavide/pythonista-docs
Documentation/matplotlib/mpl_examples/pylab_examples/fill_betweenx_demo.py
12
1576
import matplotlib.mlab as mlab from matplotlib.pyplot import figure, show import numpy as np ## Copy of fill_between.py but using fill_betweenx() instead. x = np.arange(0.0, 2, 0.01) y1 = np.sin(2*np.pi*x) y2 = 1.2*np.sin(4*np.pi*x) fig = figure() ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312, sharex=ax1) ax3 = fig.add_subplot(313, sharex=ax1) ax1.fill_betweenx(x, 0, y1) ax1.set_ylabel('between y1 and 0') ax2.fill_betweenx(x, y1, 1) ax2.set_ylabel('between y1 and 1') ax3.fill_betweenx(x, y1, y2) ax3.set_ylabel('between y1 and y2') ax3.set_xlabel('x') # now fill between y1 and y2 where a logical condition is met. Note # this is different than calling # fill_between(x[where], y1[where],y2[where] # because of edge effects over multiple contiguous regions. fig = figure() ax = fig.add_subplot(211) ax.plot(y1, x, y2, x, color='black') ax.fill_betweenx(x, y1, y2, where=y2>=y1, facecolor='green') ax.fill_betweenx(x, y1, y2, where=y2<=y1, facecolor='red') ax.set_title('fill between where') # Test support for masked arrays. y2 = np.ma.masked_greater(y2, 1.0) ax1 = fig.add_subplot(212, sharex=ax) ax1.plot(y1, x, y2, x, color='black') ax1.fill_betweenx(x, y1, y2, where=y2>=y1, facecolor='green') ax1.fill_betweenx(x, y1, y2, where=y2<=y1, facecolor='red') ax1.set_title('Now regions with y2 > 1 are masked') # This example illustrates a problem; because of the data # gridding, there are undesired unfilled triangles at the crossover # points. A brute-force solution would be to interpolate all # arrays to a very fine grid before plotting. show()
apache-2.0
timqian/sms-tools
lectures/4-STFT/plots-code/window-size.py
22
1498
import math import matplotlib.pyplot as plt import numpy as np import time, os, sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/')) import dftModel as DF import utilFunctions as UF (fs, x) = UF.wavread('../../../sounds/oboe-A4.wav') N = 128 start = .81*fs x1 = x[start:start+N] plt.figure(1, figsize=(9.5, 6)) plt.subplot(321) plt.plot(np.arange(start, (start+N), 1.0)/fs, x1*np.hamming(N), 'b', lw=1.5) plt.axis([start/fs, (start+N)/fs, min(x1*np.hamming(N)), max(x1*np.hamming(N))]) plt.title('x1, M = 128') mX, pX = DF.dftAnal(x1, np.hamming(N), N) plt.subplot(323) plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5) plt.axis([0,fs/2.0,-90,max(mX)]) plt.title('mX1') plt.subplot(325) plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5) plt.axis([0,fs/2.0,min(pX),max(pX)]) plt.title('pX1') N = 1024 start = .81*fs x2 = x[start:start+N] mX, pX = DF.dftAnal(x2, np.hamming(N), N) plt.subplot(322) plt.plot(np.arange(start, (start+N), 1.0)/fs, x2*np.hamming(N), 'b', lw=1.5) plt.axis([start/fs, (start+N)/fs, min(x2), max(x2)]) plt.title('x2, M = 1024') plt.subplot(324) plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5) plt.axis([0,fs/2.0,-90,max(mX)]) plt.title('mX2') plt.subplot(326) plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5) plt.axis([0,fs/2.0,min(pX),max(pX)]) plt.title('pX2') plt.tight_layout() plt.savefig('window-size.png') plt.show()
agpl-3.0
ritviksahajpal/LUH2
LUH2/GlobalCropRotations/crop_stats.py
1
44843
import logging import os import pdb import sys import matplotlib.pyplot as plt import numpy as np import pandas as pd import pygeoutil.util as util import GLM.constants as constants_glm import constants import plots reload(sys) sys.setdefaultencoding('utf-8') pd.options.mode.chained_assignment = None # default='warn' # Logging cur_flname = os.path.splitext(os.path.basename(__file__))[0] LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt' util.make_dir_if_missing(constants.log_dir) logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w', format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL # Output to screen logger = logging.getLogger(cur_flname) logger.addHandler(logging.StreamHandler(sys.stdout)) class CropStats: """ 1. read_raw_FAO_data: 1. Read in data on raw FAO crop acreages globally. 2. Delete redundant data, data from continents/regions 3. Replace NaNs by 0 2. read_crop_lup: Read lookup table of crops 3. plot_top_crops_by_area: Plot top crops based on how much global ag area they occupy 4. plot_top_countries_by_crops: Plot top countries based on how much global ag area they occupy 5. merge_FAO_CFT: Merge FAO data (raw or processed) with our crop functional type definitions 6. belgium_luxembourg: FAO data separates Belgium and Luxembourg starting in 2000, but before that it treats them as a single country called Belgium-Luxembourg. Here, we compute separate crop area values for Belgium and Luxembourg. We do this by computing the average fraction of crop area in each country (based on data from 2000 onwards), and applying this fraction to the previous years data. 7. merge_countries: Merge country data 8. FAO_ID_concordance: Merge with csv file containing concordance for FAO id's 9. extend_FAO_time: Extend FAO dataframe containing crop averages in time 10. output_cft_perc_to_nc: Output a dataframe containing percentage of CFT for each country for each year into a netCDF 11. output_cft_perc_to_csv: Output a dataframe containing percentage of CFT for each country (FAO) into csv 12. process_crop_stats: 1. Divide Belgium_Luxembourg values into Belgium and Luxembourg separately 2. Unite several countries into USSR, Yugoslavia, Ethiopia, China and Indonesia respectively 3. Create a dataframe extending from past to present (super set of 1961-present FAO period) """ def __init__(self): self.fao_file = util.open_or_die(constants.RAW_FAO) # Initialize names of columns self.country_code = 'Country Code' # Numeric values from 1 - ~5817, gives a unique code for country and region self.FAO_code = 'Country_FAO' self.ISO_code = 'ISO' # ISO code for each country, not the same as country_code. ISO_code is used in HYDE self.crop_name = 'Item' # Name of crop e.g. Wheat self.crop_id = 'Item Code' # Id of crop e.g. 1, 2 etc. self.cft_id = 'functional crop id' # crop functional type id e.g. 1 self.cft_type = 'functional crop type' # crop functional type e.g. C3Annual # Names of columns in past and future self.cur_cols = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)] # 850 -> 1960 self.past_cols = ['Y' + str(x) for x in range(constants.GLM_STRT_YR, constants.FAO_START_YR)] # 850 -> 1960 if constants.FAO_END_YR < constants.GLM_END_YR: self.futr_cols = ['Y' + str(x) for x in range(constants.FAO_END_YR + 1, constants.GLM_END_YR + 1)] # 2014 -> 2015 else: self.futr_cols = [] self.all_cols = self.past_cols + self.cur_cols + self.futr_cols self.FAO_perc_all_df = pd.DataFrame() # Dataframe containing FAO data for entire time-period self.FAO_mfd_df = pd.DataFrame() # Dataframe containing CFT percentage data for each country for the year 2000 # area_df: Area of CFT for each country in FAO era # gcrop: Percentage of ag area per CFT # gcnt: Percentage of ag area per country # perc_df: Percentage of ag area for each CFT by country in FAO era self.area_df = pd.DataFrame() self.gcrop = pd.DataFrame() self.gcnt = pd.DataFrame() self.perc_df = pd.DataFrame() # Path to csv of crop rotations self.csv_rotations = constants.csv_rotations self.dict_cont = {0: 'Antartica', 1: 'North_America', 2: 'South_America', 3: 'Europe', 4: 'Asia', 5: 'Africa', 6: 'Australia'} self.CCODES = 'country code' self.CONT_CODES = 'continent code' # continent and country code files self.ccodes_file = constants_glm.ccodes_file self.contcodes_file = constants_glm.contcodes_file def read_raw_FAO_data(self): """ 1. Read in data on raw FAO crop acreages globally. 2. Delete redundant data, data from continents/regions 3. Replace NaNs by 0 :return: dataframe """ logger.info('read_raw_FAO_data') df = self.fao_file.parse(constants.RAW_FAO_SHT) # Drop rows of type Y1961F...Y2013F. They are redundant. Actual data is in rows # with names like Y1961...Y2013 drop_rows = ['Y' + str(x) + 'F' for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)] for row in drop_rows: df.drop(row, axis=1, inplace=True) # Keep only countries, drop data from regions # Regions are groups of countries or continents df.drop(df[df[self.country_code] >= constants.FAO_REGION_CODE].index, inplace=True) df.fillna(0, inplace=True) return df def read_crop_lup(self, fname='', sht_name=''): """ Read lookup table of crops :return: """ logger.info('read_crop_lup') crp_file = util.open_or_die(fname) return crp_file.parse(sht_name) def plot_top_crops_by_area(self, grp_crop, col_name='', xlabel=''): """ Plot top crops based on how much global ag area they occupy :param grp_crop: :param col_name: :param xlabel: :return: """ logger.info('plot_top_crops_by_area') cols = plots.get_colors() ax = grp_crop[col_name][:constants.PLOT_CROPS].plot(kind='barh', color=cols[0], linewidth=0) # Remove spines from top and right of plot ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) # Remove y-axis label ax.set_ylabel('') ax.set_xlabel(xlabel) # Ensure that the axis ticks only show up on the bottom and left of the plot ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() # Final layout adjustment and output plt.tight_layout() plt.savefig(constants.out_dir + 'crops_by_area.png', dpi=constants.DPI) plt.close() def plot_top_countries_by_crops(self, grp_cnt, col_name='', xlabel=''): """ Plot top countries based on how much global ag area they occupy :param grp_cnt: :param col_name: :param xlabel: :return: """ logger.info('plot_top_countries_by_crops') cols = plots.get_colors() # Number of countries to plot is given by constants.PLOT_CNTRS ax = grp_cnt[col_name][:constants.PLOT_CNTRS].plot(kind='barh', color=cols[0], linewidth=0) # Remove spines from top and right of plot ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) # Remove y-axis label ax.set_ylabel('') ax.set_xlabel(xlabel) # Ensure that the axis ticks only show up on the bottom and left of the plot ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() # Final layout adjustment and output plt.tight_layout() plt.savefig(constants.out_dir + 'cntrs_by_area.png', dpi=constants.DPI) plt.close() def plot_stacked_country_cft(self, df, arr_legend, path_out, fname, ncols=2, xlabel='', ylabel='', title=''): """ :param df: :param arr_legend: :param path_out: :param fname: :param xlabel: :param ylabel: :param title: :return: """ df = (df .reset_index() .fillna(0.0)) ax = (df .plot .bar(stacked=True, colormap=plots.get_colors(palette='tableau', cmap=True), linewidth=0, use_index=False)) # Legend leg = ax.legend(fancybox=None, ncol=ncols, prop={'size': 6}) leg.get_frame().set_linewidth(0.0) leg.get_frame().set_alpha(0.5) # Create nice-looking grid for ease of visualization ax.grid(which='minor', alpha=0.2, linestyle='--') ax.grid(which='major', alpha=0.5, linestyle='--') plt.xticks(df.index, df[self.FAO_code]) plt.ylabel(ylabel) plt.xlabel(xlabel) plt.title(title) plt.tight_layout() plt.savefig(path_out + os.sep + fname, dpi=constants.DPI) plt.close() def get_stacked_df_FAO_by_CFT(self, processed_fao_df): """ Output stacked plot showing for top 10 countries by cropland area (averaged from 1961 - present) Each stack includes crop functional type area by country :param processed_fao_df: :return: """ logger.info('get_stacked_df_FAO_by_CFT') # Change from FAO indices to FAO ID's used in wood harvesting processed_fao_df = self.FAO_ID_concordance(processed_fao_df, mfd_yr=2000) # Read a lookup table that translates from crop type e.g. maize to crop functional type e.g. C4 Annual crop_df = self.read_crop_lup(fname=constants.CROP_LUP, sht_name=constants.CROP_LUP_SHT) # Merge processed FAO data and crop lookup table fao_df = pd.merge(processed_fao_df, crop_df, on=[self.crop_id, self.crop_id]) # Get top 10 countries, plot top 5 crops for each country top10 = fao_df.groupby(self.FAO_code, sort=False).mean_area.sum().nlargest(10).index df_top_crop_stacked = fao_df[fao_df[self.FAO_code].isin(top10)] df_top_crop_stacked['country_total'] = df_top_crop_stacked.groupby([self.FAO_code]).mean_area.transform(sum) df_top_crop_stacked = (df_top_crop_stacked[[self.FAO_code, self.crop_name, 'country_total', 'mean_area']] .sort_values([self.FAO_code, 'mean_area'], ascending=False) .groupby(self.FAO_code) .head(5) .sort_values(['country_total', 'mean_area'], ascending=False) .reset_index() .drop(['country_total', 'index'], axis=1)) unique_cfts = df_top_crop_stacked[self.crop_name].unique() df_top_crop_stacked = df_top_crop_stacked.pivot(index=self.FAO_code, columns=self.crop_name) # Plot stacked plot for top 10 countries by cropland area (averaged from 1961 - present) self.plot_stacked_country_cft(df_top_crop_stacked, arr_legend=unique_cfts, path_out=constants.out_dir, fname='stacked_crop_and_country.png', ncols=3, xlabel='', title=r'$Cropland\ area\ by\ functional\ type\ and\ country$', ylabel=r'$Area\ (km^{2})$') # Create a dataframe in descending order of sum of mean_area for all CFTs by country df_stacked = (fao_df[[self.FAO_code, self.cft_type, 'mean_area']] .groupby([self.FAO_code, self.cft_type]) .mean() .fillna(0) .reset_index()) # Make stacked plot for top 10 countries by cropland area (averaged from 1961 - present) df_top_stacked = (df_stacked .loc[df_stacked[self.FAO_code] .isin(df_stacked.groupby(self.FAO_code).sum().nlargest(10, 'mean_area').index)]) unique_cfts = df_top_stacked[self.cft_type].unique() df_top_stacked = df_top_stacked.pivot(index=self.FAO_code, columns=self.cft_type) # Plot stacked plot for top 10 countries by cropland area (averaged from 1961 - present) self.plot_stacked_country_cft(df_top_stacked, arr_legend=unique_cfts, path_out=constants.out_dir, fname='stacked_crop_by_cft_and_country.png', xlabel='', title=r'$Cropland\ area\ by\ functional\ type\ and\ country$', ylabel=r'$Area\ (km^{2})$') return df_stacked def merge_FAO_CFT(self, processed_fao_df): """ Merge FAO data (raw or processed) with our crop functional type definitions :param processed_fao_df: FAO data (raw or processed) :return:Combines our crop functional type definitions with FAO data """ logger.info('merge_FAO_CFT') # Change from FAO indices to FAO ID's used in wood harvesting processed_fao_df = self.FAO_ID_concordance(processed_fao_df, mfd_yr=2000) # Read a lookup table that translates from crop type e.g. maize to crop functional type e.g. C4 Annual crop_df = self.read_crop_lup(fname=constants.CROP_LUP, sht_name=constants.CROP_LUP_SHT) # Merge processed FAO data and crop lookup table fao_df = pd.merge(processed_fao_df, crop_df, on=[self.crop_id, self.crop_id]) # Select subset df with only Item, Country code and area sum # Item refers to crop name # mean_area is sum of all area from constants.FAO_START_YR to constants.FAO_END_YR for a given crop in a country sub_df = fao_df[[self.FAO_code, self.crop_name, 'mean_area']] # Compute a dataframe with data on what percentage of global crop area is occupied by each CROP grp_crop = sub_df.groupby(self.crop_name).sum().sort_values(by='mean_area', ascending=False) # Percent of all crop area grp_crop['pct'] = sub_df.groupby(self.crop_name).sum()*100.0/sub_df.groupby(self.crop_name).sum().sum() # Compute a dataframe with data on what percentage of global crop area is occupied by each COUNTRY grp_cnt = sub_df.groupby(self.FAO_code).sum().sort_values(by='mean_area', ascending=False) # Percent of all country area grp_cnt['pct'] = sub_df.groupby(self.FAO_code).sum()*100.0/sub_df.groupby(self.FAO_code).sum().sum() # Compute a dataframe subset by country and crop cols = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)] cols.extend([self.FAO_code, self.ISO_code, self.cft_id, self.cft_type]) out_df = fao_df[cols] per_df = pd.DataFrame() for yr in ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR+1)]: grp_df = out_df.groupby([self.FAO_code, self.ISO_code, self.cft_id, self.cft_type]).agg({yr: 'sum'}) grp_df.fillna(0.0, inplace=True) pct_df = grp_df.groupby(level=0).apply(lambda x: 100*x/float(x.sum())) per_df = pd.concat([per_df, pct_df], axis=1, join='inner') per_df.reset_index(inplace=True) # process FAO data so that the crop types are aggregated to our crop functional types yrs = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1, 1)] proc_fao_df = fao_df.groupby([self.ISO_code, self.FAO_code, self.cft_type])[yrs].sum() proc_fao_df.reset_index(inplace=True) return proc_fao_df, grp_crop, grp_cnt, per_df def belgium_luxembourg(self): """ FAO data separates Belgium and Luxembourg starting in 2000, but before that it treats them as a single country called Belgium-Luxembourg. Here, we compute separate crop area values for Belgium and Luxembourg. We do this by computing the average fraction of crop area in each country (based on data from 2000 onwards), and applying this fraction to the previous years data. :return: """ logger.info('belgium_luxembourg') code_bel = 255 # FAO country code for Belgium code_lux = 256 # FAO country code for Luxembourg code_blx = 15 # FAO country code for Belgium_Luxembourg fao_df = self.read_raw_FAO_data() belgium = fao_df[fao_df[self.country_code] == code_bel] luxembg = fao_df[fao_df[self.country_code] == code_lux] bel_lux = fao_df[fao_df[self.country_code] == code_blx] # Keep the year and Item columns. Item gives the crop code, which we use later to loop over. belgium = belgium.filter(regex=r'^Y\d{4}$|^Item$') luxembg = luxembg.filter(regex=r'^Y\d{4}$|^Item$') bel_lux = bel_lux.filter(regex=r'^Y\d{4}$|^Item$') # Replace all 0 values with Nan, makes it easier to replace them later fao_df = fao_df[fao_df != 0] # Loop over all crops for idx, crop in enumerate(belgium[self.crop_name]): # Extract row for a specific crop vals_belgium = belgium[belgium[self.crop_name] == crop].values vals_luxembg = luxembg[luxembg[self.crop_name] == crop].values vals_bel_lux = bel_lux[bel_lux[self.crop_name] == crop] vals_bel_lux = vals_bel_lux.drop(self.crop_name, axis=1) # Compute ratio based on non-zero values. frac is the fraction of cropland area going to Belgium. Therefore # (1 - frac) is the fraction of cropland area going to Luxembourg. if len(vals_belgium) == 0: frac = 0 elif len(vals_luxembg) == 0: frac = 1 else: sum_belgium = vals_belgium[0][1:].astype(float).sum() sum_luxembg = vals_luxembg[0][1:].astype(float).sum() frac = sum_belgium / (sum_belgium + sum_luxembg) # Compute values to be used to fill Belgium and Luxembourg rows if len(vals_bel_lux) > 0: fill_bel = pd.Series((vals_bel_lux.values * frac).squeeze()) fill_lux = pd.Series((vals_bel_lux.values * (1.0 - frac)).squeeze()) else: # For some crops, we do not have Belgium-Luxembourg data e.g. Aubergines, for such crops, just use # the individual country data else fill with 0's if len(vals_belgium) > 0: fill_bel = pd.Series(vals_belgium[0][1:].astype(float)) else: fill_bel = pd.Series(np.zeros(constants.FAO_END_YR - constants.FAO_START_YR + 1)) if len(vals_luxembg) > 0: fill_lux = pd.Series(vals_luxembg[0][1:].astype(float)) else: fill_lux = pd.Series(np.zeros(constants.FAO_END_YR - constants.FAO_START_YR + 1)) # Add a new index to each time series for Belgium and Luxembourg yrs = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1, 1)] fill_bel.index = yrs fill_lux.index = yrs # Replace the value for Belgium for specified crop fao_df[(fao_df[self.crop_name] == crop) & (fao_df[self.country_code] == code_bel)] = \ fao_df[(fao_df[self.crop_name] == crop) & (fao_df[self.country_code] == code_bel)].fillna(fill_bel) # Replace the value for Luxembourg for specified crop fao_df[(fao_df[self.crop_name] == crop) & (fao_df[self.country_code] == code_lux)] = \ fao_df[(fao_df[self.crop_name] == crop) & (fao_df[self.country_code] == code_lux)].fillna(fill_lux) # Drop Belgium_Luxembourg i.e country code 15 fao_df = fao_df[fao_df[self.country_code] != code_blx] return fao_df def merge_countries(self, df, replace_cnt=-1, new_iso=-1, cnt_list=[]): """ Merge country data :param df: dataframe :param replace_cnt: ID of country which will be changed to new_iso :param new_iso: New ISO(ID) :param cnt_list: Countries that will be merged :return: dataframe containing merged countries """ logger.info('merge_countries') # For each crop, add all the rows from the cnt_list list and replace existing row in replace_cnt all_df = df[df[self.country_code].isin(cnt_list)] for idx, crop in enumerate(all_df[self.crop_name]): s = all_df[(all_df[self.crop_name] == crop)].filter(regex=r'^Y\d{4}$').sum() df.loc[(df[self.country_code] == replace_cnt) & (df[self.crop_name] == crop), s.index] = \ pd.DataFrame(columns=s.index, data=s.values.reshape(1, len(s.index))).values # Get list of countries which will be dropped cnt_list.remove(replace_cnt) # Drop countries which have been aggregated and are no longer needed for cntry in cnt_list: df = df[df[self.country_code] != cntry] # Rename replace_cnt as new_iso if new_iso > 0: df.loc[df[self.country_code] == replace_cnt, self.country_code] = new_iso return df def FAO_ID_concordance(self, cond_df, mfd_yr=2000): """ Merge with csv file containing concordance for FAO id's :param cond_df: :param mfd_yr: :return: """ logger.info('FAO_ID_concordance') fao_id = pd.read_csv(constants.FAO_CONCOR) cond_df = pd.merge(cond_df, fao_id, how='outer', left_on=self.country_code, right_on='ID') # Drop rows which have NA's in ISO column cond_df.dropna(subset=[self.ISO_code], inplace=True) # Replace NA's by 0 in valid rows and compute sum of area in each row # valid rows have names like Y1961...Y2013 vld_rows = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)] if mfd_yr == -1: # Compute sum of each row i.e. get sum of crop areas total across time for each country cond_df['mean_area'] = cond_df[vld_rows].mean(axis=1) else: cond_df['mean_area'] = cond_df['Y'+str(mfd_yr)] return cond_df def extend_FAO_time(self, per_df): """ Extend FAO dataframe containing crop averages in time i.e. use FAO averages from 1961-201x to fill in values for remaining years :param per_df: Dataframe containing percentage of CFTs in each year for each country :return: dataframe, no side-effect """ logger.info('extend_FAO_time') # Compute average of first CFT_FRAC_YR years worth of FAO data and use it to fill information from GLM_STRT_YR # to start of FAO era -> Y1961 - Y1965 cols = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_START_YR + constants.CFT_FRAC_YR)] # TODO HACK alert # Virgin Islands (U.S.) do not have 100% crops across the FAO period (1961-2013). As a result, the average of # all CFTs for Virgin Islands (U.S.) sums up to less than 0. Manually changing it to 100. # Should get smarter about detecting and fixing such anomalies. Perhaps fill in global data instead per_df.ix[per_df[self.FAO_code] == 'Virgin Isl. (US.)', cols] = 100.0 # TODO HACK alert # Czechoslovakia has 0 ha in croplands for all of the CFT types from 1993 onwards. Messes up netCDF, so fixing # this by assigning values from the year 1992 to all years after that czech_yr = 1992 czech_cols = ['Y' + str(x) for x in range(czech_yr + 1, constants.FAO_END_YR + 1)] # Y1993 - Y2013 per_df.ix[(per_df[self.FAO_code] == 'Czechoslovakia'), czech_cols] = \ per_df.ix[(per_df[self.FAO_code] == 'Czechoslovakia'), 'Y' + str(czech_yr)] cols.extend([self.FAO_code]) past_avg_df = per_df[cols] past_avg_df['mean_CFT'] = past_avg_df.mean(axis=1) # Average entire column # Compute average of last CFT_FRAC_YR years worth of FAO data and use it to fill information from END_YR to # GLM_END_YR cols = ['Y' + str(x) for x in range(constants.FAO_END_YR - constants.CFT_FRAC_YR, constants.FAO_END_YR + 1)] # Y2008 - Y2013 cols.extend([self.FAO_code]) futr_avg_df = per_df[cols] futr_avg_df['mean_CFT'] = futr_avg_df.mean(axis=1) # Average entire column # Extend dataframe in past and future all_df = pd.DataFrame() for col in [self.FAO_code, self.ISO_code, self.cft_id, self.cft_type]: if col in per_df.columns: all_df[col] = per_df.loc[:, col] # Join past columns for col in self.past_cols: all_df[col] = past_avg_df['mean_CFT'] # Join current columns for col in self.cur_cols: all_df[col] = per_df[col] # Join future columns if any for col in self.futr_cols: all_df[col] = futr_avg_df['mean_CFT'] return all_df def constant_FAO_time(self, per_df): """ Extend FAO dataframe containing crop averages in time i.e. use FAO averages from 1961-201x to fill in values for all year CFT fraction of any country will stay constant in time :param per_df: Dataframe containing percentage of CFTs in each year for each country :return: dataframe, no side-effect """ logger.info('constant_FAO_time') # Compute average of first CFT_FRAC_YR years worth of FAO data and use it to fill information from GLM_STRT_YR # to GLM_END_YR cols = ['Y' + str(x) for x in range(constants.FAO_START_YR, constants.FAO_END_YR + 1)] # Y1961 - Y2013 # TODO HACK alert # Virgin Islands (U.S.) do not have 100% crops across the FAO period (1961-2013). As a result, the average of # all CFTs for Virgin Islands (U.S.) sums up to less than 0. Manually changing it to 100. # Should get smarter about detecting and fixing such anomalies. Perhaps fill in global data instead per_df.ix[per_df[self.FAO_code] == 'Virgin Isl. (US.)', cols] = 100.0 # TODO HACK alert # Czechoslovakia has 0 ha in croplands for all of the CFT types from 1993 onwards. Messes up netCDF, so fixing # this by assigning values from the year 1992 to all years after that czech_yr = 1992 czech_cols = ['Y' + str(x) for x in range(czech_yr + 1, constants.FAO_END_YR + 1)] # Y1993 - Y2013 per_df.ix[(per_df[self.FAO_code] == 'Czechoslovakia'), czech_cols] = \ per_df.ix[(per_df[self.FAO_code] == 'Czechoslovakia'), 'Y' + str(czech_yr)] cols.extend([self.FAO_code]) past_avg_df = per_df[cols] past_avg_df['mean_CFT'] = past_avg_df.mean(axis=1) # Average entire column # Extend dataframe in past and future all_df = pd.DataFrame() for col in [self.FAO_code, self.ISO_code, self.cft_id, self.cft_type]: if col in per_df.columns: all_df[col] = per_df.loc[:, col] for col in self.all_cols: all_df[col] = past_avg_df['mean_CFT'] return all_df def output_cft_frac_to_nc(self, df, nc_name): """ Output a dataframe containing fraction of CFT for each country for each year into a netCDF :param df: :return: """ logger.info('output_cft_frac_to_nc') # Get list of ALL country codes fao_id = pd.read_csv(constants.FAO_CONCOR) all_cntrs = fao_id[self.ISO_code].unique() # Create a lookup dictionary between crop ids (1,2 etc.) and crop names (wheat etc.) crp_ids = df[self.cft_id].unique().tolist() crp_names = df[self.cft_type].unique().tolist() dict_crps = dict(zip(crp_ids, crp_names)) # Compute global average of all CFT area percentages (one average value for each CFT) cols = self.all_cols[:] cols.extend([self.cft_id]) global_cft_avg = df[cols].groupby(self.cft_id).sum()*100.0/df[cols].groupby(self.cft_id).sum().sum() global_cft_avg = global_cft_avg.mean(axis=1) # Read in HYDE dataset to get lat, lon info ds = util.open_or_die(constants.hyde_dir) tme = ds.variables['time'][:] onc = util.open_or_die(constants.out_dir + nc_name, 'w') # Create dimensions onc.createDimension('time', np.shape(tme)[0]) onc.createDimension('country_code', len(all_cntrs)) # Create variables time = onc.createVariable('time', 'i4', ('time',)) cntrs = onc.createVariable('country_code', 'i4', ('country_code',)) # Assign time time[:] = tme # Metadata cntrs.units = '' cntrs.standard_name = 'FAO country codes' # Assign data to countries cntrs[:] = all_cntrs all = onc.createVariable('sum', 'f4', ('time', 'country_code', ), fill_value=np.nan) all[:, :] = np.zeros((np.shape(tme)[0], len(all_cntrs))) # Assign data for crop functional types for key, val in dict_crps.iteritems(): cft = onc.createVariable(val, 'f4', ('time', 'country_code', ), fill_value=np.nan) cft.units = 'fraction' # Iterate over all countries over all years for idc, i in enumerate(all_cntrs): # Check if country is present in dataframe cntr_present = i in df[self.ISO_code].values # If country is present, then fill CFT values if cntr_present: # Get data corresponding to country code 'i' and crop id 'val' and for all years (all_cols) vals = df[(df[self.ISO_code] == i) & (df[self.cft_type] == val)][self.all_cols].values # If CFT data is missing, then vals will be an empty array. # In that case, fill with 0.0 if len(vals) == 0: vals = np.zeros((1, len(tme))) else: # country data not present, fill with global average vals = np.repeat(global_cft_avg[global_cft_avg.index == key].values, len(tme)) if constants.TEST_CFT: # Assign to each of the 5 CFTs, a value of 1 / 5.0 or 20% vals = np.empty(len(tme)) vals.fill(20.0) cft[:, idc] = vals.T/100.0 all[:, idc] = all[:, idc] + vals.T/100.0 else: cft[:, idc] = vals.T/100.0 # Convert from percentage to fraction all[:, idc] = all[:, idc] + cft[:, idc] onc.close() def output_constant_cft_frac_to_nc(self, df, nc_name): """ Create a netCDF with constant CFT fraction values for each country across time :return: """ logger.info('output_constant_cft_frac_to_nc') # Get list of ALL country codes fao_id = pd.read_csv(constants.FAO_CONCOR) all_cntrs = fao_id[self.ISO_code].unique() # Create a lookup dictionary between crop ids (1,2 etc.) and crop names (wheat etc.) crp_ids = df[self.cft_id].unique().tolist() crp_names = df[self.cft_type].unique().tolist() dict_crps = dict(zip(crp_ids, crp_names)) # Compute global average of all CFT area percentages (one average value for each CFT) cols = self.all_cols[:] cols.extend([self.cft_id]) global_cft_avg = df[cols].groupby(self.cft_id).sum()*100.0/df[cols].groupby(self.cft_id).sum().sum() global_cft_avg = global_cft_avg.mean(axis = 1) # Read in HYDE dataset to get lat, lon info ds = util.open_or_die(constants.hyde_dir) tme = ds.variables['time'][:] onc = util.open_or_die(constants.out_dir + nc_name, 'w') # Create dimensions onc.createDimension('time', np.shape(tme)[0]) onc.createDimension('country_code', len(all_cntrs)) # Create variables time = onc.createVariable('time', 'i4', ('time',)) cntrs = onc.createVariable('country_code', 'i4', ('country_code',)) # Assign time time[:] = tme # Metadata cntrs.units = '' cntrs.standard_name = 'FAO country codes' # Assign data to countries cntrs[:] = all_cntrs all = onc.createVariable('sum', 'f4', ('time', 'country_code', ), fill_value=np.nan) all[:, :] = np.zeros((np.shape(tme)[0], len(all_cntrs))) # Assign data for crop functional types for key, val in dict_crps.iteritems(): cft = onc.createVariable(val, 'f4', ('time', 'country_code', ), fill_value=np.nan) cft.units = 'fraction' # Iterate over all countries over all years for idc, i in enumerate(all_cntrs): # Check if country is present in dataframe cntr_present = i in df[self.ISO_code].values # If country is present, then fill CFT values if cntr_present: # Get data corresponding to country code 'i' and crop id 'val' and for all years (all_cols) vals = df[(df[self.ISO_code] == i) & (df[self.cft_type] == val)][self.all_cols].values # If CFT data is missing, then vals will be an empty array. # In that case, fill with 0.0 if len(vals) == 0: vals = np.zeros((1, len(tme))) else: # country data not present, fill with global average vals = np.repeat(global_cft_avg[global_cft_avg.index == key].values, len(tme)) cft[:, idc] = vals.T/100.0 # Convert from percentage to fraction all[:, idc] = all[:, idc] + cft[:, idc] onc.close() def create_rotations_nc(self, df): """ :param df: Pandas dataframe Dataframe containing cropland area for each country x functional crop type combination Country_FAO functional crop type mean_area Albania C3annual 6.687115e+03 Albania C3perennial 4.139867e+03 Albania C4annual 5.300000e+04 Albania N-fixing 4.460714e+03 Algeria C3annual 5.371344e+04 :return: """ logger.info('create_rotations_nc') df_rotations = util.open_or_die(self.csv_rotations, csv_header=0) # Read in country and continent file # Create dataframe combining country and continent files ccodes = pd.read_csv(self.ccodes_file, header=None) ccodes.columns = [self.CCODES] contcodes = pd.read_csv(self.contcodes_file, header=None) contcodes.columns = [self.CONT_CODES] lup_codes = pd.concat([ccodes, contcodes], axis=1) # Merge dataframe df_merge = pd.merge(df_rotations, lup_codes, on=self.CCODES) out_nc = constants_glm.path_glm_output + os.sep + 'national_crop_rotation_data_850_2015_new.nc' nc_data = util.open_or_die(out_nc, perm='w', format='NETCDF4_CLASSIC') nc_data.description = '' # dimensions tme = np.arange(constants.GLM_STRT_YR, constants.GLM_END_YR + 1) nc_data.createDimension('time', np.shape(tme)[0]) nc_data.createDimension('country', len(ccodes)) # Populate and output nc file time = nc_data.createVariable('time', 'i4', ('time',), fill_value=0.0) country = nc_data.createVariable('country', 'i4', ('country',), fill_value=0.0) # Assign units and other metadata time.units = 'year as %Y.%f' time.calendar = 'proleptic_gregorian' country.units = 'ISO country code' # Assign values to dimensions and data time[:] = tme country[:] = ccodes.values c4ann_to_c3nfx = nc_data.createVariable('c4ann_to_c3nfx', 'f4', ('time', 'country',)) c4ann_to_c3ann = nc_data.createVariable('c4ann_to_c3ann', 'f4', ('time', 'country',)) c3ann_to_c3nfx = nc_data.createVariable('c3ann_to_c3nfx', 'f4', ('time', 'country',)) c4ann_to_c3nfx.units = 'fraction of crop type area undergoing crop rotation' c4ann_to_c3nfx.long_name = 'Crop rotations: C4 Annual, C3 N-Fixing' c4ann_to_c3ann.units = 'fraction of crop type area undergoing crop rotation' c4ann_to_c3ann.long_name = 'Crop rotations: C4 Annual, C3 Annual' c3ann_to_c3nfx.units = 'fraction of crop type area undergoing crop rotation' c3ann_to_c3nfx.long_name = 'Crop rotations: C3 Annual, C3 N-Fixing' # Loop over all countries for index, row in lup_codes.iterrows(): # print index, row[self.CCODES], row[self.CONT_CODES] # Find row containing country in df_merge row_country = df_merge[df_merge[self.CCODES] == row[self.CCODES]] if len(row_country): c4ann_to_c3nfx[:, index] = row_country['c4ann_to_c3nfx'].values[0] c4ann_to_c3ann[:, index] = row_country['c4ann_to_c3ann'].values[0] c3ann_to_c3nfx[:, index] = row_country['c3ann_to_c3nfx'].values[0] else: # TODO Find the average crop rotation rate for the continent c4ann_to_c3nfx[:, index] = 0.03 # 0.53 c4ann_to_c3ann[:, index] = 0.01 c3ann_to_c3nfx[:, index] = 0.02 nc_data.close() def output_cft_area_to_df(self, df, yr=2000): """ Output FAO ag area for each CFT by country as a csv file :param df: :param yr: Which year to use from FAO? :return: """ logger.info('output_cft_area_to_df') list_df = [] # Get list of ALL country codes fao_id = pd.read_csv(constants.FAO_CONCOR) # column names: Country_FAO ID ISO all_cntrs = fao_id[self.ISO_code].unique() # .unique() is redundant here, but harmless # Get crop functional type names (C3Annual etc.) cft_names = df[self.cft_type].unique().tolist() # Create output csv of the form: # ISO Country_FAO Area_FAO FAO_c4annual FAO_c4perren FAO_c3perren FAO_ntfixing FAO_c3annual # 4 Afghanistan ... for idc, i in enumerate(all_cntrs): try: # Get name of country cnt_name = df[df[self.ISO_code] == i][self.FAO_code].unique()[0] # Get sum of area of cropland in country based on FAO data cnt_area = df[df[self.ISO_code] == i]['Y' + str(yr)].values.sum() except: # For countries that are missing in FAO data, fill areas as 0.0 cnt_name = fao_id[fao_id[self.ISO_code] == i][self.FAO_code].iloc[0] cnt_area = 0.0 # Add country ISO code, name and area to dictionary dict_all = {'ISO': int(i), 'Country_FAO': cnt_name, 'Area_FAO': cnt_area} # Get area of individual CFTs for each country for key in cft_names: try: area_sum = df[(df[self.ISO_code] == i) & (df[self.cft_type] == key)]['Y' + str(yr)].values.sum() except: # For countries that are missing in FAO data, fill areas as 0.0 area_sum = 0.0 dict_cft = {'FAO_'+key.lower(): area_sum} dict_all.update(dict_cft) # Add CFT area to dictionary list_df.append(dict_all) return pd.DataFrame(list_df) def process_crop_stats(self): """ 1. Divide Belgium_Luxembourg values into Belgium and Luxembourg separately 2. Unite several countries into USSR, Yugoslavia, Ethiopia, China and Indonesia respectively 3. Create a dataframe extending from past to present (super set of 1961-present FAO period) :return: Nothing, outputs dataframes """ logger.info('process_crop_stats') # Divide Belgium_Luxembourg values into Belgium and Luxembourg separately blx_fao_df = self.belgium_luxembourg() # Unite several countries into USSR ussr_vals = [1, 52, 57, 63, 73, 108, 113, 119, 126, 146, 185, 208, 213, 230, 228, 235] df = self.merge_countries(blx_fao_df, replace_cnt=228, new_iso=228, cnt_list=ussr_vals) # Unite several countries into Yugoslavia yugo_vals = [80, 98, 154, 186, 198, 248, 272, 273] df = self.merge_countries(df, replace_cnt=248, new_iso=248, cnt_list=yugo_vals) etop_vals = [62, 178, 238] df = self.merge_countries(df, replace_cnt=62, new_iso=62, cnt_list=etop_vals) chna_vals = [41, 96] df = self.merge_countries(df, replace_cnt=41, new_iso=41, cnt_list=chna_vals) insa_vals = [101, 176] df = self.merge_countries(df, replace_cnt=101, new_iso=101, cnt_list=insa_vals) # Merge FAO data on crop area distribution over all countries globally with # GLM's lookup table from crop names to crop functional type classifications # Fill all missing values. # area_df: Area of CFT for each country in FAO era # gcrop: Percentage of ag area per CFT # gcnt: Percentage of ag area per country # perc_df: Percentage of ag area for each CFT by country in FAO era self.area_df, self.gcrop, self.gcnt, self.perc_df = self.merge_FAO_CFT(df) # Output stacked plot showing for top 10 countries by cropland area (averaged from 1961 - present) # Each stack includes crop functional type area by country df_stacked = self.get_stacked_df_FAO_by_CFT(df) # create crop rotations netCDF file self.create_rotations_nc(df_stacked) # Fill in missing values using values from that row only self.area_df = self.area_df.fillna(axis=1, method='backfill') self.perc_df = self.perc_df.fillna(axis=1, method='backfill') # Fill in values that are still missing with 0.0 self.area_df = self.area_df.fillna(0.0) self.perc_df = self.perc_df.fillna(0.0) # 12/01/2015: Create a dataframe containing percentages of crop functional types for all countries from start of # paleo period (currently 850 AD) to end of current time-period (currently 2015) self.all_df = self.extend_FAO_time(self.area_df) # Extend from FAO era to entire time-period self.FAO_perc_all_df = self.extend_FAO_time(self.perc_df) # Get CFT percentage data for each country for the year 2000 i.e. year for which monfreda map was made self.FAO_mfd_df = self.output_cft_area_to_df(self.FAO_perc_all_df, yr=2000) def output_crop_stats(self): """ Output class members to csv or nc or plot them :return: """ logger.info('output_crop_stats') self.output_cft_frac_to_nc(self.extend_FAO_time(self.FAO_perc_all_df), nc_name='FAO_CFT_fraction.nc') self.output_constant_cft_frac_to_nc(self.constant_FAO_time(self.perc_df), 'FAO_CFT_constant_fraction.nc') csv_df = self.output_cft_area_to_df(self.all_df, yr=2000) # Plot plots.set_matplotlib_params() self.plot_top_crops_by_area(self.gcrop, col_name='pct', xlabel='% of global ag area by crop') self.plot_top_countries_by_crops(self.gcnt, col_name='pct', xlabel='% of global ag area by country') # Output to csv self.area_df.to_csv(constants.out_dir + os.sep + 'FAO_CFT_areas_subset.csv') # Only FAO era data csv_df.to_csv(constants.out_dir + os.sep + 'FAO_CFT_areas_all.csv') # From paleo period (850 A.D.) to now (2015) self.gcnt.to_csv(constants.out_dir + os.sep + 'country_ag.csv') self.all_df.to_csv(constants.out_dir + os.sep + 'all_crops.csv') self.FAO_mfd_df.to_csv(constants.out_dir + os.sep + 'FAO_monfreda_perc_CFT.csv') self.FAO_perc_all_df.to_csv(constants.out_dir + os.sep + 'FAO_perc_all_df.csv') if __name__ == '__main__': obj = CropStats() obj.process_crop_stats() obj.output_crop_stats()
mit
taohaoge/vincent
examples/map_examples.py
11
6721
# -*- coding: utf-8 -*- """ Vincent Map Examples """ #Build a map from scratch from vincent import * world_topo = r'world-countries.topo.json' state_topo = r'us_states.topo.json' lake_topo = r'lakes_50m.topo.json' county_geo = r'us_counties.geo.json' county_topo = r'us_counties.topo.json' or_topo = r'or_counties.topo.json' vis = Visualization(width=960, height=500) vis.data['countries'] = Data( name='countries', url=world_topo, format={'type': 'topojson', 'feature': 'world-countries'} ) geo_transform = Transform( type='geopath', value="data", projection='winkel3', scale=200, translate=[480, 250] ) geo_from = MarkRef(data='countries', transform=[geo_transform]) enter_props = PropertySet( stroke=ValueRef(value='#000000'), path=ValueRef(field='path') ) update_props = PropertySet(fill=ValueRef(value='steelblue')) mark_props = MarkProperties(enter=enter_props, update=update_props) vis.marks.append( Mark(type='path', from_=geo_from, properties=mark_props) ) vis.to_json('vega.json') #Convenience Method geo_data = [{'name': 'countries', 'url': world_topo, 'feature': 'world-countries'}] vis = Map(geo_data=geo_data, scale=200) vis.to_json('vega.json') #States & Counties geo_data = [{'name': 'counties', 'url': county_topo, 'feature': 'us_counties.geo'}, {'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'} ] vis = Map(geo_data=geo_data, scale=1000, projection='albersUsa') del vis.marks[1].properties.update vis.marks[0].properties.update.fill.value = '#084081' vis.marks[1].properties.enter.stroke.value = '#fff' vis.marks[0].properties.enter.stroke.value = '#7bccc4' vis.to_json('vega.json') #Choropleth import json import pandas as pd #Map the county codes we have in our geometry to those in the #county_data file, which contains additional rows we don't need with open('us_counties.topo.json', 'r') as f: get_id = json.load(f) #A little FIPS code munging new_geoms = [] for geom in get_id['objects']['us_counties.geo']['geometries']: geom['properties']['FIPS'] = int(geom['properties']['FIPS']) new_geoms.append(geom) get_id['objects']['us_counties.geo']['geometries'] = new_geoms with open('us_counties.topo.json', 'w') as f: json.dump(get_id, f) #Grab the FIPS codes and load them into a dataframe geometries = get_id['objects']['us_counties.geo']['geometries'] county_codes = [x['properties']['FIPS'] for x in geometries] county_df = pd.DataFrame({'FIPS': county_codes}, dtype=str) county_df = county_df.astype(int) #Read into Dataframe, cast to int for consistency df = pd.read_csv('data/us_county_data.csv', na_values=[' ']) df['FIPS'] = df['FIPS'].astype(int) #Perform an inner join, pad NA's with data from nearest county merged = pd.merge(df, county_df, on='FIPS', how='inner') merged = merged.fillna(method='pad') geo_data = [{'name': 'counties', 'url': county_topo, 'feature': 'us_counties.geo'}] vis = Map(data=merged, geo_data=geo_data, scale=1100, projection='albersUsa', data_bind='Employed_2011', data_key='FIPS', map_key={'counties': 'properties.FIPS'}) vis.marks[0].properties.enter.stroke_opacity = ValueRef(value=0.5) #Change our domain for an even inteager vis.scales['color'].domain = [0, 189000] vis.legend(title='Number Employed 2011') vis.to_json('vega.json') #Lets look at different stats vis.rebind(column='Civilian_labor_force_2011', brew='BuPu') vis.to_json('vega.json') vis.rebind(column='Unemployed_2011', brew='PuBu') vis.to_json('vega.json') vis.rebind(column='Unemployment_rate_2011', brew='YlGnBu') vis.to_json('vega.json') vis.rebind(column='Median_Household_Income_2011', brew='RdPu') vis.to_json('vega.json') #Mapping US State Level Data state_data = pd.read_csv('data/US_Unemployment_Oct2012.csv') geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}] vis = Map(data=state_data, geo_data=geo_data, scale=1000, projection='albersUsa', data_bind='Unemployment', data_key='NAME', map_key={'states': 'properties.NAME'}) vis.legend(title='Unemployment (%)') vis.to_json('vega.json') #Iterating State Level Data yoy = pd.read_table('data/State_Unemp_YoY.txt', delim_whitespace=True) #Standardize State names to match TopoJSON for keying names = [] for row in yoy.iterrows(): pieces = row[1]['NAME'].split('_') together = ' '.join(pieces) names.append(together.title()) yoy['NAME'] = names geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}] vis = Map(data=yoy, geo_data=geo_data, scale=1000, projection='albersUsa', data_bind='AUG_2012', data_key='NAME', map_key={'states': 'properties.NAME'}, brew='YlGnBu') #Custom threshold scale vis.scales[0].type='threshold' vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12] vis.legend(title='Unemployment (%)') vis.to_json('vega.json') #Rebind and set our scale again vis.rebind(column='AUG_2013', brew='YlGnBu') vis.scales[0].type='threshold' vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12] vis.to_json('vega.json') vis.rebind(column='CHANGE', brew='YlGnBu') vis.scales[0].type='threshold' vis.scales[0].domain = [-1.5, -1.3, -1.1, 0, 0.1, 0.3, 0.5, 0.8] vis.legends[0].title = "YoY Change in Unemployment (%)" vis.to_json('vega.json') #Oregon County-level population data or_data = pd.read_table('data/OR_County_Data.txt', delim_whitespace=True) or_data['July_2012_Pop']= or_data['July_2012_Pop'].astype(int) #Standardize keys with open('or_counties.topo.json', 'r') as f: counties = json.load(f) def split_county(name): parts = name.split(' ') parts.pop(-1) return ''.join(parts).upper() #A little FIPS code munging new_geoms = [] for geom in counties['objects']['or_counties.geo']['geometries']: geom['properties']['COUNTY'] = split_county(geom['properties']['COUNTY']) new_geoms.append(geom) counties['objects']['or_counties.geo']['geometries'] = new_geoms with open('or_counties.topo.json', 'w') as f: json.dump(counties, f) geo_data = [{'name': 'states', 'url': state_topo, 'feature': 'us_states.geo'}, {'name': 'or_counties', 'url': or_topo, 'feature': 'or_counties.geo'}] vis = Map(data=or_data, geo_data=geo_data, scale=3700, translate=[1480, 830], projection='albersUsa', data_bind='July_2012_Pop', data_key='NAME', map_key={'or_counties': 'properties.COUNTY'}) vis.marks[0].properties.update.fill.value = '#c2c2c2' vis.to_json('vega.json')
mit
alexsavio/scikit-learn
sklearn/ensemble/gradient_boosting.py
6
74941
"""Gradient Boosted Regression Trees This module contains methods for fitting gradient boosted regression trees for both classification and regression. The module structure is the following: - The ``BaseGradientBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ in the concrete ``LossFunction`` used. - ``GradientBoostingClassifier`` implements gradient boosting for classification problems. - ``GradientBoostingRegressor`` implements gradient boosting for regression problems. """ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti, # Arnaud Joly, Jacob Schreiber # License: BSD 3 clause from __future__ import print_function from __future__ import division from abc import ABCMeta from abc import abstractmethod from .base import BaseEnsemble from ..base import BaseEstimator from ..base import ClassifierMixin from ..base import RegressorMixin from ..externals import six from ..feature_selection.from_model import _LearntSelectorMixin from ._gradient_boosting import predict_stages from ._gradient_boosting import predict_stage from ._gradient_boosting import _random_sample_mask import numbers import numpy as np from scipy import stats from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import issparse from time import time from ..tree.tree import DecisionTreeRegressor from ..tree._tree import DTYPE from ..tree._tree import TREE_LEAF from ..utils import check_random_state from ..utils import check_array from ..utils import check_X_y from ..utils import column_or_1d from ..utils import check_consistent_length from ..utils import deprecated from ..utils.extmath import logsumexp from ..utils.fixes import expit from ..utils.fixes import bincount from ..utils.stats import _weighted_percentile from ..utils.validation import check_is_fitted from ..utils.multiclass import check_classification_targets from ..exceptions import NotFittedError class QuantileEstimator(BaseEstimator): """An estimator predicting the alpha-quantile of the training targets.""" def __init__(self, alpha=0.9): if not 0 < alpha < 1.0: raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha) self.alpha = alpha def fit(self, X, y, sample_weight=None): if sample_weight is None: self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0) else: self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0) def predict(self, X): check_is_fitted(self, 'quantile') y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.quantile) return y class MeanEstimator(BaseEstimator): """An estimator predicting the mean of the training targets.""" def fit(self, X, y, sample_weight=None): if sample_weight is None: self.mean = np.mean(y) else: self.mean = np.average(y, weights=sample_weight) def predict(self, X): check_is_fitted(self, 'mean') y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.mean) return y class LogOddsEstimator(BaseEstimator): """An estimator predicting the log odds ratio.""" scale = 1.0 def fit(self, X, y, sample_weight=None): # pre-cond: pos, neg are encoded as 1, 0 if sample_weight is None: pos = np.sum(y) neg = y.shape[0] - pos else: pos = np.sum(sample_weight * y) neg = np.sum(sample_weight * (1 - y)) if neg == 0 or pos == 0: raise ValueError('y contains non binary labels.') self.prior = self.scale * np.log(pos / neg) def predict(self, X): check_is_fitted(self, 'prior') y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.prior) return y class ScaledLogOddsEstimator(LogOddsEstimator): """Log odds ratio scaled by 0.5 -- for exponential loss. """ scale = 0.5 class PriorProbabilityEstimator(BaseEstimator): """An estimator predicting the probability of each class in the training data. """ def fit(self, X, y, sample_weight=None): if sample_weight is None: sample_weight = np.ones_like(y, dtype=np.float64) class_counts = bincount(y, weights=sample_weight) self.priors = class_counts / class_counts.sum() def predict(self, X): check_is_fitted(self, 'priors') y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64) y[:] = self.priors return y class ZeroEstimator(BaseEstimator): """An estimator that simply predicts zero. """ def fit(self, X, y, sample_weight=None): if np.issubdtype(y.dtype, int): # classification self.n_classes = np.unique(y).shape[0] if self.n_classes == 2: self.n_classes = 1 else: # regression self.n_classes = 1 def predict(self, X): check_is_fitted(self, 'n_classes') y = np.empty((X.shape[0], self.n_classes), dtype=np.float64) y.fill(0.0) return y class LossFunction(six.with_metaclass(ABCMeta, object)): """Abstract base class for various loss functions. Attributes ---------- K : int The number of regression trees to be induced; 1 for regression and binary classification; ``n_classes`` for multi-class classification. """ is_multi_class = False def __init__(self, n_classes): self.K = n_classes def init_estimator(self): """Default ``init`` estimator for loss function. """ raise NotImplementedError() @abstractmethod def __call__(self, y, pred, sample_weight=None): """Compute the loss of prediction ``pred`` and ``y``. """ @abstractmethod def negative_gradient(self, y, y_pred, **kargs): """Compute the negative gradient. Parameters --------- y : np.ndarray, shape=(n,) The target labels. y_pred : np.ndarray, shape=(n,): The predictions. """ def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Update the terminal regions (=leaves) of the given tree and updates the current predictions of the model. Traverses tree and invokes template method `_update_terminal_region`. Parameters ---------- tree : tree.Tree The tree object. X : ndarray, shape=(n, m) The data array. y : ndarray, shape=(n,) The target labels. residual : ndarray, shape=(n,) The residuals (usually the negative gradient). y_pred : ndarray, shape=(n,) The predictions. sample_weight : ndarray, shape=(n,) The weight of each sample. sample_mask : ndarray, shape=(n,) The sample mask to be used. learning_rate : float, default=0.1 learning rate shrinks the contribution of each tree by ``learning_rate``. k : int, default 0 The index of the estimator being updated. """ # compute leaf for each sample in ``X``. terminal_regions = tree.apply(X) # mask all which are not in sample mask. masked_terminal_regions = terminal_regions.copy() masked_terminal_regions[~sample_mask] = -1 # update each leaf (= perform line search) for leaf in np.where(tree.children_left == TREE_LEAF)[0]: self._update_terminal_region(tree, masked_terminal_regions, leaf, X, y, residual, y_pred[:, k], sample_weight) # update predictions (both in-bag and out-of-bag) y_pred[:, k] += (learning_rate * tree.value[:, 0, 0].take(terminal_regions, axis=0)) @abstractmethod def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Template method for updating terminal regions (=leaves). """ class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for regression loss functions. """ def __init__(self, n_classes): if n_classes != 1: raise ValueError("``n_classes`` must be 1 for regression but " "was %r" % n_classes) super(RegressionLossFunction, self).__init__(n_classes) class LeastSquaresError(RegressionLossFunction): """Loss function for least squares (LS) estimation. Terminal regions need not to be updated for least squares. """ def init_estimator(self): return MeanEstimator() def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.mean((y - pred.ravel()) ** 2.0) else: return (1.0 / sample_weight.sum() * np.sum(sample_weight * ((y - pred.ravel()) ** 2.0))) def negative_gradient(self, y, pred, **kargs): return y - pred.ravel() def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Least squares does not need to update terminal regions. But it has to update the predictions. """ # update predictions y_pred[:, k] += learning_rate * tree.predict(X).ravel() def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): pass class LeastAbsoluteError(RegressionLossFunction): """Loss function for least absolute deviation (LAD) regression. """ def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.abs(y - pred.ravel()).mean() else: return (1.0 / sample_weight.sum() * np.sum(sample_weight * np.abs(y - pred.ravel()))) def negative_gradient(self, y, pred, **kargs): """1.0 if y - pred > 0.0 else -1.0""" pred = pred.ravel() return 2.0 * (y - pred > 0.0) - 1.0 def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """LAD updates terminal regions to median estimates. """ terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0) tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50) class HuberLossFunction(RegressionLossFunction): """Huber loss function for robust regression. M-Regression proposed in Friedman 2001. References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. """ def __init__(self, n_classes, alpha=0.9): super(HuberLossFunction, self).__init__(n_classes) self.alpha = alpha self.gamma = None def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred gamma = self.gamma if gamma is None: if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma if sample_weight is None: sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / y.shape[0] else: sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * sample_weight[~gamma_mask] * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / sample_weight.sum() return loss def negative_gradient(self, y, pred, sample_weight=None, **kargs): pred = pred.ravel() diff = y - pred if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma residual = np.zeros((y.shape[0],), dtype=np.float64) residual[gamma_mask] = diff[gamma_mask] residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask]) self.gamma = gamma return residual def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) gamma = self.gamma diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) median = _weighted_percentile(diff, sample_weight, percentile=50) diff_minus_median = diff - median tree.value[leaf, 0] = median + np.mean( np.sign(diff_minus_median) * np.minimum(np.abs(diff_minus_median), gamma)) class QuantileLossFunction(RegressionLossFunction): """Loss function for quantile regression. Quantile regression allows to estimate the percentiles of the conditional distribution of the target. """ def __init__(self, n_classes, alpha=0.9): super(QuantileLossFunction, self).__init__(n_classes) assert 0 < alpha < 1.0 self.alpha = alpha self.percentile = alpha * 100.0 def init_estimator(self): return QuantileEstimator(self.alpha) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred alpha = self.alpha mask = y > pred if sample_weight is None: loss = (alpha * diff[mask].sum() + (1.0 - alpha) * diff[~mask].sum()) / y.shape[0] else: loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) + (1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) / sample_weight.sum()) return loss def negative_gradient(self, y, pred, **kargs): alpha = self.alpha pred = pred.ravel() mask = y > pred return (alpha * mask) - ((1.0 - alpha) * ~mask) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) sample_weight = sample_weight.take(terminal_region, axis=0) val = _weighted_percentile(diff, sample_weight, self.percentile) tree.value[leaf, 0] = val class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for classification loss functions. """ def _score_to_proba(self, score): """Template method to convert scores to probabilities. the does not support probabilites raises AttributeError. """ raise TypeError('%s does not support predict_proba' % type(self).__name__) @abstractmethod def _score_to_decision(self, score): """Template method to convert scores to decisions. Returns int arrays. """ class BinomialDeviance(ClassificationLossFunction): """Binomial deviance loss function for binary classification. Binary classification is a special case; here, we only need to fit one tree instead of ``n_classes`` trees. """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(BinomialDeviance, self).__init__(1) def init_estimator(self): return LogOddsEstimator() def __call__(self, y, pred, sample_weight=None): """Compute the deviance (= 2 * negative log-likelihood). """ # logaddexp(0, v) == log(1.0 + exp(v)) pred = pred.ravel() if sample_weight is None: return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred)) else: return (-2.0 / sample_weight.sum() * np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred)))) def negative_gradient(self, y, pred, **kargs): """Compute the residual (= negative gradient). """ return y - expit(pred.ravel()) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. our node estimate is given by: sum(w * (y - prob)) / sum(w * prob * (1 - prob)) we take advantage that: y - prob = residual """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = expit(score.ravel()) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class MultinomialDeviance(ClassificationLossFunction): """Multinomial deviance loss function for multi-class classification. For multi-class classification we need to fit ``n_classes`` trees at each stage. """ is_multi_class = True def __init__(self, n_classes): if n_classes < 3: raise ValueError("{0:s} requires more than 2 classes.".format( self.__class__.__name__)) super(MultinomialDeviance, self).__init__(n_classes) def init_estimator(self): return PriorProbabilityEstimator() def __call__(self, y, pred, sample_weight=None): # create one-hot label encoding Y = np.zeros((y.shape[0], self.K), dtype=np.float64) for k in range(self.K): Y[:, k] = y == k if sample_weight is None: return np.sum(-1 * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) else: return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) def negative_gradient(self, y, pred, k=0, **kwargs): """Compute negative gradient for the ``k``-th class. """ return y - np.nan_to_num(np.exp(pred[:, k] - logsumexp(pred, axis=1))) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) numerator *= (self.K - 1) / self.K denominator = np.sum(sample_weight * (y - residual) * (1.0 - y + residual)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): return np.nan_to_num( np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis]))) def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class ExponentialLoss(ClassificationLossFunction): """Exponential loss function for binary classification. Same loss as AdaBoost. References ---------- Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007 """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(ExponentialLoss, self).__init__(1) def init_estimator(self): return ScaledLogOddsEstimator() def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() if sample_weight is None: return np.mean(np.exp(-(2. * y - 1.) * pred)) else: return (1.0 / sample_weight.sum() * np.sum(sample_weight * np.exp(-(2 * y - 1) * pred))) def negative_gradient(self, y, pred, **kargs): y_ = -(2. * y - 1.) return y_ * np.exp(y_ * pred.ravel()) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] pred = pred.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) y_ = 2. * y - 1. numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred)) denominator = np.sum(sample_weight * np.exp(-y_ * pred)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = expit(2.0 * score.ravel()) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): return (score.ravel() >= 0.0).astype(np.int) LOSS_FUNCTIONS = {'ls': LeastSquaresError, 'lad': LeastAbsoluteError, 'huber': HuberLossFunction, 'quantile': QuantileLossFunction, 'deviance': None, # for both, multinomial and binomial 'exponential': ExponentialLoss, } INIT_ESTIMATORS = {'zero': ZeroEstimator} class VerboseReporter(object): """Reports verbose output to stdout. If ``verbose==1`` output is printed once in a while (when iteration mod verbose_mod is zero).; if larger than 1 then output is printed for each update. """ def __init__(self, verbose): self.verbose = verbose def init(self, est, begin_at_stage=0): # header fields and line format str header_fields = ['Iter', 'Train Loss'] verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}'] # do oob? if est.subsample < 1: header_fields.append('OOB Improve') verbose_fmt.append('{oob_impr:>16.4f}') header_fields.append('Remaining Time') verbose_fmt.append('{remaining_time:>16s}') # print the header line print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields)) self.verbose_fmt = ' '.join(verbose_fmt) # plot verbose info each time i % verbose_mod == 0 self.verbose_mod = 1 self.start_time = time() self.begin_at_stage = begin_at_stage def update(self, j, est): """Update reporter with new iteration. """ do_oob = est.subsample < 1 # we need to take into account if we fit additional estimators. i = j - self.begin_at_stage # iteration relative to the start iter if (i + 1) % self.verbose_mod == 0: oob_impr = est.oob_improvement_[j] if do_oob else 0 remaining_time = ((est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)) if remaining_time > 60: remaining_time = '{0:.2f}m'.format(remaining_time / 60.0) else: remaining_time = '{0:.2f}s'.format(remaining_time) print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time)) if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0): # adjust verbose frequency (powers of 10) self.verbose_mod *= 10 class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble, _LearntSelectorMixin)): """Abstract base class for Gradient Boosting. """ @abstractmethod def __init__(self, loss, learning_rate, n_estimators, criterion, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, min_impurity_split, init, subsample, max_features, random_state, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): self.n_estimators = n_estimators self.learning_rate = learning_rate self.loss = loss self.criterion = criterion self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.subsample = subsample self.max_features = max_features self.max_depth = max_depth self.min_impurity_split = min_impurity_split self.init = init self.random_state = random_state self.alpha = alpha self.verbose = verbose self.max_leaf_nodes = max_leaf_nodes self.warm_start = warm_start self.presort = presort self.estimators_ = np.empty((0, 0), dtype=np.object) def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc=None, X_csr=None): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == np.bool loss = self.loss_ original_y = y for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, presort=self.presort) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(np.float64) if X_csc is not None: tree.fit(X_csc, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted) else: tree.fit(X, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted) # update tree leaves if X_csr is not None: loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) else: loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) # add tree to ensemble self.estimators_[i, k] = tree return y_pred def _check_params(self): """Check validity of parameters and raise ValueError if not valid. """ if self.n_estimators <= 0: raise ValueError("n_estimators must be greater than 0 but " "was %r" % self.n_estimators) if self.learning_rate <= 0.0: raise ValueError("learning_rate must be greater than 0 but " "was %r" % self.learning_rate) if (self.loss not in self._SUPPORTED_LOSS or self.loss not in LOSS_FUNCTIONS): raise ValueError("Loss '{0:s}' not supported. ".format(self.loss)) if self.loss == 'deviance': loss_class = (MultinomialDeviance if len(self.classes_) > 2 else BinomialDeviance) else: loss_class = LOSS_FUNCTIONS[self.loss] if self.loss in ('huber', 'quantile'): self.loss_ = loss_class(self.n_classes_, self.alpha) else: self.loss_ = loss_class(self.n_classes_) if not (0.0 < self.subsample <= 1.0): raise ValueError("subsample must be in (0,1] but " "was %r" % self.subsample) if self.init is not None: if isinstance(self.init, six.string_types): if self.init not in INIT_ESTIMATORS: raise ValueError('init="%s" is not supported' % self.init) else: if (not hasattr(self.init, 'fit') or not hasattr(self.init, 'predict')): raise ValueError("init=%r must be valid BaseEstimator " "and support both fit and " "predict" % self.init) if not (0.0 < self.alpha < 1.0): raise ValueError("alpha must be in (0.0, 1.0) but " "was %r" % self.alpha) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": # if is_classification if self.n_classes_ > 1: max_features = max(1, int(np.sqrt(self.n_features))) else: # is regression max_features = self.n_features elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features))) else: raise ValueError("Invalid value for max_features: %r. " "Allowed string values are 'auto', 'sqrt' " "or 'log2'." % self.max_features) elif self.max_features is None: max_features = self.n_features elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float if 0. < self.max_features <= 1.: max_features = max(int(self.max_features * self.n_features), 1) else: raise ValueError("max_features must be in (0, n_features]") self.max_features_ = max_features def _init_state(self): """Initialize model state and allocate model state data structures. """ if self.init is None: self.init_ = self.loss_.init_estimator() elif isinstance(self.init, six.string_types): self.init_ = INIT_ESTIMATORS[self.init]() else: self.init_ = self.init self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=np.object) self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64) # do oob? if self.subsample < 1.0: self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64) def _clear_state(self): """Clear the state of the gradient boosting model. """ if hasattr(self, 'estimators_'): self.estimators_ = np.empty((0, 0), dtype=np.object) if hasattr(self, 'train_score_'): del self.train_score_ if hasattr(self, 'oob_improvement_'): del self.oob_improvement_ if hasattr(self, 'init_'): del self.init_ def _resize_state(self): """Add additional ``n_estimators`` entries to all attributes. """ # self.n_estimators is the number of additional est to fit total_n_estimators = self.n_estimators if total_n_estimators < self.estimators_.shape[0]: raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0])) self.estimators_.resize((total_n_estimators, self.loss_.K)) self.train_score_.resize(total_n_estimators) if (self.subsample < 1 or hasattr(self, 'oob_improvement_')): # if do oob resize arrays or create new if not available if hasattr(self, 'oob_improvement_'): self.oob_improvement_.resize(total_n_estimators) else: self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64) def _is_initialized(self): return len(getattr(self, 'estimators_', [])) > 0 def _check_initialized(self): """Check that the estimator is initialized, raising an error if not.""" if self.estimators_ is None or len(self.estimators_) == 0: raise NotFittedError("Estimator not fitted, call `fit`" " before making predictions`.") def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ # if not warmstart - clear the estimator state if not self.warm_start: self._clear_state() # Check input X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE) n_samples, self.n_features = X.shape if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) check_consistent_length(X, y, sample_weight) y = self._validate_y(y) random_state = check_random_state(self.random_state) self._check_params() if not self._is_initialized(): # init state self._init_state() # fit initial model - FIXME make sample_weight optional self.init_.fit(X, y, sample_weight) # init predictions y_pred = self.init_.predict(X) begin_at_stage = 0 else: # add more estimators to fitted model # invariant: warm_start = True if self.n_estimators < self.estimators_.shape[0]: raise ValueError('n_estimators=%d must be larger or equal to ' 'estimators_.shape[0]=%d when ' 'warm_start==True' % (self.n_estimators, self.estimators_.shape[0])) begin_at_stage = self.estimators_.shape[0] y_pred = self._decision_function(X) self._resize_state() X_idx_sorted = None presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if presort == 'auto' and issparse(X): presort = False elif presort == 'auto': presort = True if presort == True: if issparse(X): raise ValueError("Presorting is not supported for sparse matrices.") else: X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0), dtype=np.int32) # fit the boosting stages n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor, X_idx_sorted) # change shape of arrays after fit (early-stopping or additional ests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] return self def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None, X_idx_sorted=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples, ), dtype=np.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ # Set min_weight_leaf from min_weight_fraction_leaf if self.min_weight_fraction_leaf != 0. and sample_weight is not None: min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight)) else: min_weight_leaf = 0. if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage old_oob_score = loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc, X_csr) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = ( old_oob_score - loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break return i + 1 def _make_estimator(self, append=True): # we don't need _make_estimator raise NotImplementedError() def _init_decision_function(self, X): """Check input and compute prediction of ``init``. """ self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) if X.shape[1] != self.n_features: raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format( self.n_features, X.shape[1])) score = self.init_.predict(X).astype(np.float64) return score def _decision_function(self, X): # for use in inner loop, not raveling the output in single-class case, # not doing input validation. score = self._init_decision_function(X) predict_stages(self.estimators_, X, self.learning_rate, score) return score @deprecated(" and will be removed in 0.19") def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- score : array, shape = [n_samples, n_classes] or [n_samples] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification produce an array of shape [n_samples]. """ self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) score = self._decision_function(X) if score.shape[1] == 1: return score.ravel() return score def _staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') score = self._init_decision_function(X) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X, self.learning_rate, score) yield score.copy() @deprecated(" and will be removed in 0.19") def staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ for dec in self._staged_decision_function(X): # no yield from in Python2.X yield dec @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ self._check_initialized() total_sum = np.zeros((self.n_features, ), dtype=np.float64) for stage in self.estimators_: stage_sum = sum(tree.feature_importances_ for tree in stage) / len(stage) total_sum += stage_sum importances = total_sum / len(self.estimators_) return importances def _validate_y(self, y): self.n_classes_ = 1 if y.dtype.kind == 'O': y = y.astype(np.float64) # Default implementation return y def apply(self, X): """Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. Returns ------- X_leaves : array_like, shape = [n_samples, n_estimators, n_classes] For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. In the case of binary classification n_classes is 1. """ self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) # n_classes will be equal to 1 in the binary classification or the # regression case. n_estimators, n_classes = self.estimators_.shape leaves = np.zeros((X.shape[0], n_estimators, n_classes)) for i in range(n_estimators): for j in range(n_classes): estimator = self.estimators_[i, j] leaves[:, i, j] = estimator.apply(X, check_input=False) return leaves class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin): """Gradient Boosting for classification. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage ``n_classes_`` regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function. Binary classification is a special case where only a single regression tree is induced. Read more in the :ref:`User Guide <gradient_boosting>`. Parameters ---------- loss : {'deviance', 'exponential'}, optional (default='deviance') loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm. learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. criterion : string, optional (default="friedman_mse") The function to measure the quality of a split. Supported criteria are "friedman_mse" for the mean squared error with improvement score by Friedman, "mse" for mean squared error, and "mae" for the mean absolute error. The default value of "friedman_mse" is generally the best as it can provide a better approximation in some cases. .. versionadded:: 0.18 min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, optional (default=1e-7) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. versionadded:: 0.18 init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. presort : bool or 'auto', optional (default='auto') Whether to presort the data to speed up the finding of best splits in fitting. Auto mode by default will use presorting on dense data and default to normal sorting on sparse data. Setting presort to true on sparse data will raise an error. .. versionadded:: 0.17 *presort* parameter. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. init : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``] The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary classification, otherwise n_classes. See also -------- sklearn.tree.DecisionTreeClassifier, RandomForestClassifier AdaBoostClassifier References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('deviance', 'exponential') def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, min_impurity_split=1e-7, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): super(GradientBoostingClassifier, self).__init__( loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_depth=max_depth, init=init, subsample=subsample, max_features=max_features, random_state=random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes, min_impurity_split=min_impurity_split, warm_start=warm_start, presort=presort) def _validate_y(self, y): check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) return y def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : array, shape = [n_samples, n_classes] or [n_samples] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification produce an array of shape [n_samples]. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') score = self._decision_function(X) if score.shape[1] == 1: return score.ravel() return score def staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ for dec in self._staged_decision_function(X): # no yield from in Python2.X yield dec def predict(self, X): """Predict class for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y: array of shape = ["n_samples] The predicted values. """ score = self.decision_function(X) decisions = self.loss_._score_to_decision(score) return self.classes_.take(decisions, axis=0) def staged_predict(self, X): """Predict class at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of array of shape = [n_samples] The predicted value of the input samples. """ for score in self._staged_decision_function(X): decisions = self.loss_._score_to_decision(score) yield self.classes_.take(decisions, axis=0) def predict_proba(self, X): """Predict class probabilities for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ score = self.decision_function(X) try: return self.loss_._score_to_proba(score) except NotFittedError: raise except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def predict_log_proba(self, X): """Predict class log-probabilities for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : array of shape = [n_samples] The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ proba = self.predict_proba(X) return np.log(proba) def staged_predict_proba(self, X): """Predict class probabilities at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of array of shape = [n_samples] The predicted value of the input samples. """ try: for score in self._staged_decision_function(X): yield self.loss_._score_to_proba(score) except NotFittedError: raise except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin): """Gradient Boosting for regression. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage a regression tree is fit on the negative gradient of the given loss function. Read more in the :ref:`User Guide <gradient_boosting>`. Parameters ---------- loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls') loss function to be optimized. 'ls' refers to least squares regression. 'lad' (least absolute deviation) is a highly robust loss function solely based on order information of the input variables. 'huber' is a combination of the two. 'quantile' allows quantile regression (use `alpha` to specify the quantile). learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. criterion : string, optional (default="friedman_mse") The function to measure the quality of a split. Supported criteria are "friedman_mse" for the mean squared error with improvement score by Friedman, "mse" for mean squared error, and "mae" for the mean absolute error. The default value of "friedman_mse" is generally the best as it can provide a better approximation in some cases. .. versionadded:: 0.18 min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. min_impurity_split : float, optional (default=1e-7) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. versionadded:: 0.18 alpha : float (default=0.9) The alpha-quantile of the huber loss function and the quantile loss function. Only if ``loss='huber'`` or ``loss='quantile'``. init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. presort : bool or 'auto', optional (default='auto') Whether to presort the data to speed up the finding of best splits in fitting. Auto mode by default will use presorting on dense data and default to normal sorting on sparse data. Setting presort to true on sparse data will raise an error. .. versionadded:: 0.17 optional parameter *presort*. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. `init` : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1] The collection of fitted sub-estimators. See also -------- DecisionTreeRegressor, RandomForestRegressor References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile') def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, min_impurity_split=1e-7, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): super(GradientBoostingRegressor, self).__init__( loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_depth=max_depth, init=init, subsample=subsample, max_features=max_features, min_impurity_split=min_impurity_split, random_state=random_state, alpha=alpha, verbose=verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start, presort=presort) def predict(self, X): """Predict regression target for X. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array of shape = [n_samples] The predicted values. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') return self._decision_function(X).ravel() def staged_predict(self, X): """Predict regression target at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of array of shape = [n_samples] The predicted value of the input samples. """ for y in self._staged_decision_function(X): yield y.ravel() def apply(self, X): """Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. Returns ------- X_leaves : array_like, shape = [n_samples, n_estimators] For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. """ leaves = super(GradientBoostingRegressor, self).apply(X) leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0]) return leaves
bsd-3-clause
nilbody/h2o-3
h2o-py/h2o/grid/grid_search.py
1
27931
""" This module implements grid search class. All grid search things inherit from this class. """ from __future__ import print_function from __future__ import absolute_import from builtins import zip from builtins import str from builtins import range from builtins import object from .. import H2OConnection, H2OJob, H2OFrame from ..estimators import H2OEstimator from ..two_dim_table import H2OTwoDimTable from ..display import H2ODisplay import h2o from .metrics import * import itertools class H2OGridSearch(object): # for use with sort_by(): ASC=True DESC=False def __init__(self, model, hyper_params, grid_id=None): """ Grid Search of a Hyper-Parameter Space for a Model Parameters ---------- model : H2OEstimator The type of model to be explored initialized with optional parameters that will be unchanged across explored models. hyper_params: dict A dictionary of string parameters (keys) and a list of values to be explored by grid search (values). grid_id : str, optional The unique id assigned to the resulting grid object. If none is given, an id will automatically be generated. Returns ------- A new H2OGridSearch instance. Examples -------- >>> from h2o.grid.grid_search import H2OGridSearch >>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator >>> hyper_parameters = {'alpha': [0.01,0.5], 'lambda': [1e-5,1e-6]} >>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters) >>> training_data = h2o.import_file("smalldata/logreg/benign.csv") >>> gs.train(x=range(3) + range(4,11),y=3, training_frame=training_data) >>> gs.show() """ self._id = grid_id self.model = model() if model.__class__.__name__ == 'type' else model # H2O Estimator child class self.hyper_params = dict(hyper_params) self._grid_json = None self.models = None # list of H2O Estimator instances self._parms = {} # internal, for object recycle # self.parms = {} # external# self._estimator_type = None# self._future = False # used by __repr__/show to query job state# self._job = None # used when _future is True# @property def grid_id(self): """ Returns ------- A key that identifies this grid search object in H2O. """ return self._id @grid_id.setter def grid_id(self, value): oldname = self.grid_id self._id = value h2o.rapids("(rename \"{}\" \"{}\")".format(oldname, value)) @property def model_ids(self): return [i['name'] for i in self._grid_json["model_ids"]] @property def hyper_names(self): return self._grid_json["hyper_names"] @property def failed_params(self): return self._grid_json["failed_params"] if self._grid_json["failed_params"] else None @property def failure_details(self): return self._grid_json['failure_details'] if self._grid_json['failure_details'] else None @property def failure_stack_traces(self): return self._grid_json['failure_stack_traces'] if self._grid_json['failure_stack_traces'] else None @property def failed_raw_params(self): return self._grid_json['failed_raw_params'] if self._grid_json['failed_raw_params'] else None def start(self,x,y=None,training_frame=None,offset_column=None,fold_column=None,weights_column=None,validation_frame=None,**params): """Asynchronous model build by specifying the predictor columns, response column, and any additional frame-specific values. To block for results, call join. Parameters ---------- x : list A list of column names or indices indicating the predictor columns. y : str An index or a column name indicating the response column. training_frame : H2OFrame The H2OFrame having the columns indicated by x and y (as well as any additional columns specified by fold, offset, and weights). offset_column : str, optional The name or index of the column in training_frame that holds the offsets. fold_column : str, optional The name or index of the column in training_frame that holds the per-row fold assignments. weights_column : str, optional The name or index of the column in training_frame that holds the per-row weights. validation_frame : H2OFrame, optional H2OFrame with validation data to be scored on while training. """ self._future=True self.train(x=x, y=y, training_frame=training_frame, offset_column=offset_column, fold_column=fold_column, weights_column=weights_column, validation_frame=validation_frame, **params) def join(self): self._future=False self._job.poll() self._job=None def train(self,x,y=None,training_frame=None,offset_column=None,fold_column=None,weights_column=None,validation_frame=None,**params): #same api as estimator_base train algo_params = locals() parms = self._parms.copy() parms.update({k:v for k, v in algo_params.items() if k not in ["self","params", "algo_params", "parms"] }) parms["hyper_parameters"] = self.hyper_params # unique to grid search parms.update({k:v for k,v in list(self.model._parms.items()) if v is not None}) # unique to grid search if '__class__' in parms: # FIXME: hackt for PY3 del parms['__class__'] y = algo_params["y"] tframe = algo_params["training_frame"] if tframe is None: raise ValueError("Missing training_frame") if y is not None: if isinstance(y, (list, tuple)): if len(y) == 1: parms["y"] = y[0] else: raise ValueError('y must be a single column reference') self._estimator_type = "classifier" if tframe[y].isfactor() else "regressor" self.build_model(parms) def build_model(self, algo_params): if algo_params["training_frame"] is None: raise ValueError("Missing training_frame") x = algo_params.pop("x") y = algo_params.pop("y",None) training_frame = algo_params.pop("training_frame") validation_frame = algo_params.pop("validation_frame",None) is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"]) algo = self.model._compute_algo() #unique to grid search is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm" if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.") if not is_unsupervised and y is None: raise ValueError("Missing response") self._model_build(x, y, training_frame, validation_frame, algo_params) def _model_build(self, x, y, tframe, vframe, kwargs): kwargs['training_frame'] = tframe if vframe is not None: kwargs["validation_frame"] = vframe if isinstance(y, int): y = tframe.names[y] if y is not None: kwargs['response_column'] = y if not isinstance(x, (list,tuple)): x=[x] if isinstance(x[0], int): x = [tframe.names[i] for i in x] offset = kwargs["offset_column"] folds = kwargs["fold_column"] weights= kwargs["weights_column"] ignored_columns = list(set(tframe.names) - set(x + [y,offset,folds,weights])) kwargs["ignored_columns"] = None if ignored_columns==[] else [h2o.h2o._quoted(col) for col in ignored_columns] kwargs = dict([(k, kwargs[k].frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if kwargs[k] is not None]) # gruesome one-liner algo = self.model._compute_algo() #unique to grid search kwargs["_rest_version"] = 99 #unique to grid search grid = H2OJob(H2OConnection.post_json("Grid/"+algo, **kwargs), job_type=(algo+" Grid Build")) if self._future: self._job = grid return grid.poll() if '_rest_version' in list(kwargs.keys()): grid_json = H2OConnection.get_json("Grids/"+grid.dest_key, _rest_version=kwargs['_rest_version']) else: grid_json = H2OConnection.get_json("Grids/"+grid.dest_key) self.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] #get first model returned in list of models from grid search to get model class (binomial, multinomial, etc) first_model_json = H2OConnection.get_json("Models/"+grid_json['model_ids'][0]['name'], _rest_version=kwargs['_rest_version'])['models'][0] self._resolve_grid(grid.dest_key, grid_json, first_model_json) def _resolve_grid(self, grid_id, grid_json, first_model_json): model_class = H2OGridSearch._metrics_class(first_model_json) m = model_class() m._id = grid_id m._grid_json = grid_json # m._metrics_class = metrics_class m._parms = self._parms H2OEstimator.mixin(self,model_class) self.__dict__.update(m.__dict__.copy()) def __getitem__(self, item): return self.models[item] def __iter__(self): nmodels = len(self.models) return (self[i] for i in range(nmodels)) def __len__(self): return len(self.models) def __repr__(self): self.show() return "" def predict(self, test_data): """Predict on a dataset. Parameters ---------- test_data : H2OFrame Data to be predicted on. Returns ------- H2OFrame filled with predictions. """ return {model.model_id:model.predict(test_data) for model in self.models} def is_cross_validated(self): """ Returns ------- True if the model was cross-validated. """ return {model.model_id:model.is_cross_validated() for model in self.models} def xval_keys(self): """ Returns ------- The model keys for the cross-validated model. """ return {model.model_id: model.xval_keys() for model in self.models} def get_xval_models(self,key=None): """Return a Model object. Parameters ---------- key : str If None, return all cross-validated models; otherwise return the model that key points. Returns ------- A model or list of models. """ return {model.model_id: model.get_xval_models(key) for model in self.models} def xvals(self): """ Returns ------- A list of cross-validated models. """ return {model.model_id:model.xvals for model in self.models} def deepfeatures(self, test_data, layer): """Obtain a hidden layer's details on a dataset. Parameters ---------- test_data: H2OFrame Data to create a feature space on layer: int index of the hidden layer Returns ------- A dictionary of hidden layer details for each model. """ return {model.model_id:model.deepfeatures(test_data, layer) for model in self.models} def weights(self, matrix_id=0): """ Return the frame for the respective weight matrix :param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return. :return: an H2OFrame which represents the weight matrix identified by matrix_id """ return {model.model_id:model.weights(matrix_id) for model in self.models} def biases(self, vector_id=0): """ Return the frame for the respective bias vector :param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return. :return: an H2OFrame which represents the bias vector identified by vector_id """ return {model.model_id:model.biases(vector_id) for model in self.models} def normmul(self): """ Normalization/Standardization multipliers for numeric predictors """ return {model.model_id:model.normmul() for model in self.models} def normsub(self): """ Normalization/Standardization offsets for numeric predictors """ return {model.model_id:model.normsub() for model in self.models} def respmul(self): """ Normalization/Standardization multipliers for numeric response """ return {model.model_id:model.respmul() for model in self.models} def respsub(self): """ Normalization/Standardization offsets for numeric response """ return {model.model_id:model.respsub() for model in self.models} def catoffsets(self): """ Categorical offsets for one-hot encoding """ return {model.model_id:model.catoffsets() for model in self.models} def model_performance(self, test_data=None, train=False, valid=False): """Generate model metrics for this model on test_data. :param test_data: Data set for which model metrics shall be computed against. Both train and valid arguments are ignored if test_data is not None. :param train: Report the training metrics for the model. If the test_data is the training data, the training metrics are returned. :param valid: Report the validation metrics for the model. If train and valid are True, then it defaults to True. :return: An object of class H2OModelMetrics. """ return {model.model_id:model.model_performance(test_data, train, valid) for model in self.models} def score_history(self): """Retrieve Model Score History Returns ------- Score history (H2OTwoDimTable) """ return {model.model_id:model.score_history() for model in self.models} def summary(self, header=True): """Print a detailed summary of the explored models.""" table = [] for model in self.models: model_summary = model._model_json["output"]["model_summary"] r_values = list(model_summary.cell_values[0]) r_values[0] = model.model_id table.append(r_values) # if h2o.can_use_pandas(): # import pandas # pandas.options.display.max_rows = 20 # print pandas.DataFrame(table,columns=self.col_header) # return print() if header: print('Grid Summary:') print() H2ODisplay(table, ['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left") def show(self): """Print innards of grid, without regard to type""" hyper_combos = itertools.product(*list(self.hyper_params.values())) if not self.models: c_values = [[idx+1, list(val)] for idx, val in enumerate(hyper_combos)] print(H2OTwoDimTable(col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys()))+']'], table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values)) else: if self.failed_raw_params: print('Failed Hyperparameters and Message:') for i in range(len(self.failed_raw_params)): print([str(fi) for fi in self.failed_raw_params[i]], '-->', self.failure_details[i]) print(self.sort_by('mse')) def varimp(self, use_pandas=False): """Pretty print the variable importances, or return them in a list/pandas DataFrame Parameters ---------- use_pandas: boolean, optional If True, then the variable importances will be returned as a pandas data frame. Returns ------- A dictionary of lists or Pandas DataFrame instances. """ return {model.model_id:model.varimp(use_pandas) for model in self.models} def residual_deviance(self,train=False,valid=False,xval=False): """Retreive the residual deviance if this model has the attribute, or None otherwise. Parameters ---------- train : boolean, optional, default=True Get the residual deviance for the training set. If both train and valid are False, then train is selected by default. valid: boolean, optional Get the residual deviance for the validation set. If both train and valid are True, then train is selected by default. xval : boolean, optional Get the residual deviance for the cross-validated models. Returns ------- Return the residual deviance, or None if it is not present. """ return {model.model_id:model.residual_deviance(train, valid, xval) for model in self.models} def residual_degrees_of_freedom(self,train=False,valid=False,xval=False): """ Retreive the residual degress of freedom if this model has the attribute, or None otherwise. :param train: Get the residual dof for the training set. If both train and valid are False, then train is selected by default. :param valid: Get the residual dof for the validation set. If both train and valid are True, then train is selected by default. :return: Return the residual dof, or None if it is not present. """ return {model.model_id:model.residual_degrees_of_freedom(train, valid, xval) for model in self.models} def null_deviance(self,train=False,valid=False,xval=False): """ Retreive the null deviance if this model has the attribute, or None otherwise. :param: train Get the null deviance for the training set. If both train and valid are False, then train is selected by default. :param: valid Get the null deviance for the validation set. If both train and valid are True, then train is selected by default. :return: Return the null deviance, or None if it is not present. """ return {model.model_id:model.null_deviance(train, valid, xval) for model in self.models} def null_degrees_of_freedom(self,train=False,valid=False,xval=False): """ Retreive the null degress of freedom if this model has the attribute, or None otherwise. :param train: Get the null dof for the training set. If both train and valid are False, then train is selected by default. :param valid: Get the null dof for the validation set. If both train and valid are True, then train is selected by default. :return: Return the null dof, or None if it is not present. """ return {model.model_id:model.null_degrees_of_freedom(train, valid, xval) for model in self.models} def pprint_coef(self): """ Pretty print the coefficents table (includes normalized coefficients) :return: None """ for i, model in enumerate(self.models): print('Model', i) model.pprint_coef() print() def coef(self): """ :return: Return the coefficients for this model. """ return {model.model_id:model.coef() for model in self.models} def coef_norm(self): """ :return: Return the normalized coefficients """ return {model.model_id:model.coef_norm() for model in self.models} def r2(self, train=False, valid=False, xval=False): """ Return the R^2 for this regression model. The R^2 value is defined to be 1 - MSE/var, where var is computed as sigma*sigma. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the R^2 value for the training data. :param valid: If valid is True, then return the R^2 value for the validation data. :param xval: If xval is True, then return the R^2 value for the cross validation data. :return: The R^2 for this regression model. """ return {model.model_id:model.r2(train, valid, xval) for model in self.models} def mse(self, train=False, valid=False, xval=False): """ Get the MSE(s). If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the MSE value for the training data. :param valid: If valid is True, then return the MSE value for the validation data. :param xval: If xval is True, then return the MSE value for the cross validation data. :return: The MSE for this regression model. """ return {model.model_id:model.mse(train, valid, xval) for model in self.models} def logloss(self, train=False, valid=False, xval=False): """ Get the Log Loss(s). If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the Log Loss value for the training data. :param valid: If valid is True, then return the Log Loss value for the validation data. :param xval: If xval is True, then return the Log Loss value for the cross validation data. :return: The Log Loss for this binomial model. """ return {model.model_id:model.logloss(train, valid, xval) for model in self.models} def mean_residual_deviance(self, train=False, valid=False, xval=False): """ Get the Mean Residual Deviances(s). If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the Mean Residual Deviance value for the training data. :param valid: If valid is True, then return the Mean Residual Deviance value for the validation data. :param xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data. :return: The Mean Residual Deviance for this regression model. """ return {model.model_id:model.mean_residual_deviance(train, valid, xval) for model in self.models} def auc(self, train=False, valid=False, xval=False): """ Get the AUC(s). If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the AUC value for the training data. :param valid: If valid is True, then return the AUC value for the validation data. :param xval: If xval is True, then return the AUC value for the validation data. :return: The AUC. """ return {model.model_id:model.auc(train, valid, xval) for model in self.models} def aic(self, train=False, valid=False, xval=False): """ Get the AIC(s). If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the AIC value for the training data. :param valid: If valid is True, then return the AIC value for the validation data. :param xval: If xval is True, then return the AIC value for the validation data. :return: The AIC. """ return {model.model_id:model.aic(train, valid, xval) for model in self.models} def giniCoef(self, train=False, valid=False, xval=False): """ Get the Gini Coefficient(s). If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval" :param train: If train is True, then return the Gini Coefficient value for the training data. :param valid: If valid is True, then return the Gini Coefficient value for the validation data. :param xval: If xval is True, then return the Gini Coefficient value for the cross validation data. :return: The Gini Coefficient for this binomial model. """ return {model.model_id:model.giniCoef(train, valid, xval) for model in self.models} def sort_by(self, metric, increasing=True): """ Sort the models in the grid space by a metric. Parameters ---------- metric: str A metric ('logloss', 'auc', 'r2') by which to sort the models. If addtional arguments are desired, they can be passed to the metric, for example 'logloss(valid=True)' increasing: boolean, optional Sort the metric in increasing (True) (default) or decreasing (False) order. Returns ------- An H2OTwoDimTable of the sorted models showing model id, hyperparameters, and metric value. The best model can be selected and used for prediction. Examples -------- >>> grid_search_results = gs.sort_by('F1', False) >>> best_model_id = grid_search_results['Model Id'][0] >>> best_model = h2o.get_model(best_model_id) >>> best_model.predict(test_data) """ if metric[-1] != ')': metric += '()' c_values = [list(x) for x in zip(*sorted(eval('self.' + metric + '.items()'), key = lambda k_v: k_v[1]))] c_values.insert(1,[self.get_hyperparams(model_id, display=False) for model_id in c_values[0]]) if not increasing: for col in c_values: col.reverse() if metric[-2] == '(': metric = metric[:-2] return H2OTwoDimTable(col_header=['Model Id', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys()))+']', metric], table_header='Grid Search Results for ' + self.model.__class__.__name__, cell_values=list(zip(*c_values))) def get_hyperparams(self, id, display=True): """ Get the hyperparameters of a model explored by grid search. Parameters ---------- id: str The model id of the model with hyperparameters of interest. display: boolean Flag to indicate whether to display the hyperparameter names. Returns ------- A list of the hyperparameters for the specified model. """ idx = id if isinstance(id, int) else self.model_ids.index(id) model = self[idx] res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'],list) else model.params[h]['actual'] for h in self.hyper_params] if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys()))+']') return res @staticmethod def _metrics_class(model_json): model_type = model_json["output"]["model_category"] if model_type=="Binomial": model_class = H2OBinomialGridSearch elif model_type=="Clustering": model_class = H2OClusteringGridSearch elif model_type=="Regression": model_class = H2ORegressionGridSearch elif model_type=="Multinomial": model_class = H2OMultinomialGridSearch elif model_type=="AutoEncoder": model_class = H2OAutoEncoderGridSearch elif model_type=="DimReduction":model_class = H2ODimReductionGridSearch else: raise NotImplementedError(model_type) return model_class @staticmethod def get_grid(model, hyper_params, grid_id, **kwargs): """ Retrieve an H2OGridSearch instance already trained given its original model, hyper_params, and grid_id. Parameters ---------- model : H2O Estimator model The type of model explored that is initalized with optional parameters which are unchanged across explored models. hyper_params: dict A dictionary of string parameters (keys) and a list of values explored by grid search (values). grid_id : str, optional The unique id assigned to the grid object. Returns ------- A new H2OGridSearch instance that is a replica of the H2OGridSearch instance with the specified grid_id. """ if kwargs is None: kwargs = {} kwargs['_rest_version'] = 99 grid_json = H2OConnection.get_json("Grids/"+grid_id, **kwargs) grid = H2OGridSearch(model, hyper_params, grid_id) grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] first_model_json = H2OConnection.get_json("Models/"+grid_json['model_ids'][0]['name'], _rest_version=kwargs['_rest_version'])['models'][0] model_class = H2OGridSearch._metrics_class(first_model_json) m = model_class() m._id = grid_id m._grid_json = grid_json # m._metrics_class = metrics_class m._parms = grid._parms H2OEstimator.mixin(grid,model_class) grid.__dict__.update(m.__dict__.copy()) return grid
apache-2.0
yasirkhan380/Tutorials
notebooks/fig_code/figures.py
34
8633
import numpy as np import matplotlib.pyplot as plt import warnings def plot_venn_diagram(): fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[])) ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5)) ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5)) ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black')) ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center') ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center') ax.text(0.0, 0.6, '$I$', size=30) ax.axis('equal') def plot_example_decision_tree(): fig = plt.figure(figsize=(10, 4)) ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[]) ax.set_title('Example Decision Tree: Animal Classification', size=24) def text(ax, x, y, t, size=20, **kwargs): ax.text(x, y, t, ha='center', va='center', size=size, bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs) text(ax, 0.5, 0.9, "How big is\nthe animal?", 20) text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18) text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18) text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14) text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14) text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14) text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14) text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4) text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4) text(ax, 0.21, 0.45, "yes", 12, alpha=0.4) text(ax, 0.34, 0.45, "no", 12, alpha=0.4) text(ax, 0.66, 0.45, "yes", 12, alpha=0.4) text(ax, 0.79, 0.45, "no", 12, alpha=0.4) ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k') ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k') ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k') ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k') ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k') ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k') ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k') ax.axis([0, 1, 0, 1]) def visualize_tree(estimator, X, y, boundaries=True, xlim=None, ylim=None): estimator.fit(X, y) if xlim is None: xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1) if ylim is None: ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1) x_min, x_max = xlim y_min, y_max = ylim xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100)) Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow') plt.clim(y.min(), y.max()) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow') plt.axis('off') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.clim(y.min(), y.max()) # Plot the decision boundaries def plot_boundaries(i, xlim, ylim): if i < 0: return tree = estimator.tree_ if tree.feature[i] == 0: plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k') plot_boundaries(tree.children_left[i], [xlim[0], tree.threshold[i]], ylim) plot_boundaries(tree.children_right[i], [tree.threshold[i], xlim[1]], ylim) elif tree.feature[i] == 1: plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k') plot_boundaries(tree.children_left[i], xlim, [ylim[0], tree.threshold[i]]) plot_boundaries(tree.children_right[i], xlim, [tree.threshold[i], ylim[1]]) if boundaries: plot_boundaries(0, plt.xlim(), plt.ylim()) def plot_tree_interactive(X, y): from sklearn.tree import DecisionTreeClassifier def interactive_tree(depth=1): clf = DecisionTreeClassifier(max_depth=depth, random_state=0) visualize_tree(clf, X, y) from IPython.html.widgets import interact return interact(interactive_tree, depth=[1, 5]) def plot_kmeans_interactive(min_clusters=1, max_clusters=6): from IPython.html.widgets import interact from sklearn.metrics.pairwise import euclidean_distances from sklearn.datasets.samples_generator import make_blobs with warnings.catch_warnings(): warnings.filterwarnings('ignore') X, y = make_blobs(n_samples=300, centers=4, random_state=0, cluster_std=0.60) def _kmeans_step(frame=0, n_clusters=4): rng = np.random.RandomState(2) labels = np.zeros(X.shape[0]) centers = rng.randn(n_clusters, 2) nsteps = frame // 3 for i in range(nsteps + 1): old_centers = centers if i < nsteps or frame % 3 > 0: dist = euclidean_distances(X, centers) labels = dist.argmin(1) if i < nsteps or frame % 3 > 1: centers = np.array([X[labels == j].mean(0) for j in range(n_clusters)]) nans = np.isnan(centers) centers[nans] = old_centers[nans] # plot the data and cluster centers plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow', vmin=0, vmax=n_clusters - 1); plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o', c=np.arange(n_clusters), s=200, cmap='rainbow') plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o', c='black', s=50) # plot new centers if third frame if frame % 3 == 2: for i in range(n_clusters): plt.annotate('', centers[i], old_centers[i], arrowprops=dict(arrowstyle='->', linewidth=1)) plt.scatter(centers[:, 0], centers[:, 1], marker='o', c=np.arange(n_clusters), s=200, cmap='rainbow') plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='black', s=50) plt.xlim(-4, 4) plt.ylim(-2, 10) if frame % 3 == 1: plt.text(3.8, 9.5, "1. Reassign points to nearest centroid", ha='right', va='top', size=14) elif frame % 3 == 2: plt.text(3.8, 9.5, "2. Update centroids to cluster means", ha='right', va='top', size=14) return interact(_kmeans_step, frame=[0, 50], n_clusters=[min_clusters, max_clusters]) def plot_image_components(x, coefficients=None, mean=0, components=None, imshape=(8, 8), n_components=6, fontsize=12): if coefficients is None: coefficients = x if components is None: components = np.eye(len(coefficients), len(x)) mean = np.zeros_like(x) + mean fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2)) g = plt.GridSpec(2, 5 + n_components, hspace=0.3) def show(i, j, x, title=None): ax = fig.add_subplot(g[i, j], xticks=[], yticks=[]) ax.imshow(x.reshape(imshape), interpolation='nearest') if title: ax.set_title(title, fontsize=fontsize) show(slice(2), slice(2), x, "True") approx = mean.copy() show(0, 2, np.zeros_like(x) + mean, r'$\mu$') show(1, 2, approx, r'$1 \cdot \mu$') for i in range(0, n_components): approx = approx + coefficients[i] * components[i] show(0, i + 3, components[i], r'$c_{0}$'.format(i + 1)) show(1, i + 3, approx, r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1)) plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom', transform=plt.gca().transAxes, fontsize=fontsize) show(slice(2), slice(-2, None), approx, "Approx") def plot_pca_interactive(data, n_components=6): from sklearn.decomposition import PCA from IPython.html.widgets import interact pca = PCA(n_components=n_components) Xproj = pca.fit_transform(data) def show_decomp(i=0): plot_image_components(data[i], Xproj[i], pca.mean_, pca.components_) interact(show_decomp, i=(0, data.shape[0] - 1));
bsd-3-clause
AntoineToubhans/MongoTs
setup.py
1
1230
# !/usr/bin/env python from setuptools import setup, find_packages requirements = [ 'pandas', 'parse', 'pymongo', ] setup_requirements = [ 'bumpversion', ] test_requirements = [ 'flake8', 'mongomock', 'mypy' 'pytest', 'unittest-data-provider', ] description = 'A fast API for storing and querying time series in MongoDb' setup( name='mongots', packages=find_packages(include=['mongots']), version='0.2.0', description=description, long_description=description, author='Antoine Toubhans', license='MIT', author_email='[email protected]', url='https://github.com/AntoineToubhans/MongoTs', keywords=['mongo', 'pymongo', 'timeserie', ], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], install_requires=requirements, tests_require=test_requirements, setup_requires=setup_requirements, )
mit
mdeff/ntds_2017
projects/reports/jam/SongJamAnalysis.py
1
9046
#%matplotlib inline #importing all relevant packages import pandas as pd import networkx as nx import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt import seaborn as sns from pygsp import graphs, filters, plotting import pickle import community from scipy import sparse, stats, spatial from SongJamAnalysis import * plt.rcParams['figure.figsize'] = (10, 10) plotting.BACKEND = 'matplotlib' #Setting style for plotting sns.set_style('whitegrid') sns.set_palette('cubehelix',3) ''' This function draws graphs which show the spectral analysis of the signal(spread over tim of jam) on the network. This function is not modular and should only be used in the context it has been used in the notebook. ''' def getInsightSongJam(artist, song, fig, axes, graph, data): if not(nx.is_connected(graph)): #keeping the largest connected component graph=sorted(nx.connected_component_subgraphs(graph), key = len, reverse=True) graph = graph[0] print('\nThe weigted network Gcc has {} nodes.'.format(len(graph.nodes()))) print('The weigted network Gcc has {} edges.'.format(graph.size())) print('The nodes in Gcc has an average degree of {0:.2f}.'.format(sum(list(dict(nx.degree(graph,weight='weight')).values()))/len(graph.nodes()))) #Get relevant data data_grimes = data.loc[(data.artist==artist) & (data.title==song)] #keep only two columns data_grimes = data_grimes[['user_id','creation_date']] #keep only ones in the node list data_grimes = data_grimes[(data_grimes.user_id.isin(graph.nodes()))] #change format to datetime data_grimes['creation_date'] = data_grimes['creation_date'].apply(pd.to_datetime) data_grimes.drop_duplicates(subset='user_id', inplace=True) data_grimes.reset_index(inplace=True) #initialize signal data_grimes['signal'] = 0 data_grimes.loc[0, 'signal'] = 1 #add signal has amount of days from the day of the first jam for i in range(1, data_grimes.shape[0]): a = data_grimes['creation_date'].iloc[i] - data_grimes['creation_date'].iloc[0] b = data_grimes['creation_date'].iloc[0] - data_grimes['creation_date'].iloc[i] data_grimes.loc[i, 'signal'] = a.days*24 + a.seconds/3600 #create graph graph_f = graphs.Graph(nx.adjacency_matrix(graph)) #sort the dataframe to be in the same order as the nodes data_grimes['user_cat'] = pd.Categorical(data_grimes['user_id'],categories=list(graph.nodes()),ordered=True) data_grimes.sort_values('user_cat', inplace=True) #compute fourier graph_f.compute_laplacian('normalized') graph_f.compute_fourier_basis() #plot the signal signal = data_grimes['signal'].values graph_f.set_coordinates(graph_f.U[:, [1, 2]]) graph_f.plot_signal(signal,vertex_size=10, ax=axes[1]) scale = 1 plt.axis([-scale,scale,-scale,scale]) axes[0].plot(np.abs(graph_f.gft(signal))) axes[0].set_xlabel('Laplacian Eigenvalues') axes[0].set_ylabel('Fourier domain response') axes[0].set_title('Implusion signal') # Compute the signal smoothness with gradient smoothness = signal.T @ graph_f.L @ signal / np.linalg.norm(signal)**2 print("Signal gradient: %f" % smoothness) # Compute the suffles signal smoothness with gradient smoothness = 0 for i in range(5): shuffled_signal = data_grimes.sample(frac=1)['signal'].values smoothness += shuffled_signal.T @ graph_f.L @ shuffled_signal / (5 * np.linalg.norm(shuffled_signal)**2) print("Shuffled signal gradient: %f" % smoothness) ''' This function builds the network which contains only the people having jammed the targeted song. This function is not modular and should only be used in the context it has been used in the notebook. ''' def Create_network_jam(artist, title, net, groups, data, merge): data_network = data.loc[(data.artist==artist) & (data.title==title)] nodes_network = data_network['user_id'] G = nx.Graph() G.add_nodes_from(nodes_network.values) #adding nodes and edges of followers net_jammers = net[(net['follower_user_id'].isin(nodes_network.values)) & (net['followed_user_id'].isin(nodes_network.values))] for i in range(len(net_jammers)): if net_jammers['followed_user_id'].iloc[i] != net['follower_user_id'].iloc[i]: G.add_edge(net_jammers['followed_user_id'].iloc[i],net_jammers['follower_user_id'].iloc[i],weight=1) #adding weights data_network = data.loc[(data.artist==artist) & (data.title==title)] nodes_network = data_network['user_id'] jammers = merge[(merge['user_id_x'].isin(nodes_network.values)) & (merge['user_id_y'].isin(nodes_network.values))] grouped_jammers = jammers.groupby(['user_id_x','user_id_y']).count() for i in range(len(grouped_jammers)): if grouped_jammers.iloc[i].name[0] in G.nodes and grouped_jammers.iloc[i].name[1] in G.neighbors(grouped_jammers.iloc[i].name[0]): G[grouped_jammers.iloc[i].name[0]][grouped_jammers.iloc[i].name[1]]['weight'] += grouped_jammers.iloc[i]['jam_id'] return G ''' This function builds the network which contains only the people having jammed the targeted song. The difference with the function above is that it also takes the neighbors of these people in the graph. This function is not modular and should only be used in the context it has been used in the notebook. ''' def Create_network(artist,title, data, net, merge): data_network = data.loc[(data.artist==artist) & (data.title==title)] nodes_network = data_network['user_id'] G = nx.Graph() G.add_nodes_from(nodes_network.values) #adding nodes and edges of followers net_jammed = net[net['followed_user_id'].isin(nodes_network.values) & \ (net['follower_user_id'] != net['followed_user_id'])] for i in tqdm(range(len(net_jammed))): G.add_node(net['follower_user_id'].iloc[i]) G.add_edge(net['followed_user_id'].iloc[i],net['follower_user_id'].iloc[i],weight=1) #adding weights jammers = merge[merge['user_id_x'].isin(list(G.nodes())) & (merge['user_id_y'].isin(list(G.nodes())))] grouped_jammers = jammers.groupby(['user_id_x','user_id_y']).count() for i in tqdm(range(len(grouped_jammers))): if grouped_jammers.iloc[i].name[1] in G.neighbors(grouped_jammers.iloc[i].name[0]): G[grouped_jammers.iloc[i].name[0]][grouped_jammers.iloc[i].name[1]]['weight'] += \ np.log(grouped_jammers.iloc[i]['jam_id']) return G ''' This function draws graphs which show the spectral analysis of the signal(spread over tim of jam) on the network. This function is not modular and should only be used in the context it has been used in the notebook. ''' def create_signal(network, artist, title, data): network_nodes = network.nodes() #Get relevant data data_n = data[(data.artist==artist) & (data.title==title)] #keep only two columns data_n = data_n[['user_id','creation_date']] #keep only ones in the node list data_n = data_n[(data_n.user_id.isin(network_nodes))] #initialize signal data_n['signal'] = 0 data_n['signal'].iloc[0] = 1 #change format to datetime data_n['creation_date'] = data_n['creation_date'].apply(pd.to_datetime) #add signal as amount of days from the day of the first jam for i in tqdm(range(1,len(data_n['user_id']))): a = data_n['creation_date'].iloc[i]-data_n['creation_date'].iloc[0] data_n['signal'].iloc[i] = a.days #adding extra data, the nodes that are user that didnt share the song data_n_extra = pd.DataFrame(columns=data_n.columns) data_n_extra['user_id'] = network_nodes #set their signal data_n_extra['signal'] = 2000 #append all data data_n = data_n.append(data_n_extra) #drop duplicates, but since the ones in the signal are at the top they will be kept data_n = data_n.drop_duplicates('user_id',keep='first') #sort the dataframe to be in the same order as the nodes data_n['user_cat'] = pd.Categorical(data_n['user_id'],categories=list(network.nodes()),ordered=True) data_n = data_n.sort_values('user_cat') return data_n ''' This function plots comparison between the music genre jammed by the different communities of a graph. This function is not modular and should only be used in the context it has been used in the notebook. ''' def compareCommunitiesTaste(communities, genres): comms_df = pd.DataFrame.from_dict(communities, 'index') comms_df.columns = ['community'] comms_df.community = comms_df.community.astype(int) comms_df['col'] = 1 genres = genres.merge(comms_df, left_index=True, right_index=True) biggest_comms = genres.groupby('community').sum().sort_values(by='col', ascending=False).iloc[:15] biggest_comms = biggest_comms.divide(biggest_comms.col.values, axis=0) # Taste comparison fig, ax = plt.subplots(figsize=(30,30)) biggest_comms[biggest_comms.columns[:-1]].plot.barh(stacked=True, ax=ax, colormap='Paired')
mit
sdonapar/CompStats
hypothesis.py
75
10162
"""This file contains code used in "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2010 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function, division import nsfg import nsfg2 import first import thinkstats2 import thinkplot import copy import random import numpy as np import matplotlib.pyplot as pyplot class CoinTest(thinkstats2.HypothesisTest): """Tests the hypothesis that a coin is fair.""" def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ heads, tails = data test_stat = abs(heads - tails) return test_stat def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ heads, tails = self.data n = heads + tails sample = [random.choice('HT') for _ in range(n)] hist = thinkstats2.Hist(sample) data = hist['H'], hist['T'] return data class DiffMeansPermute(thinkstats2.HypothesisTest): """Tests a difference in means by permutation.""" def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ group1, group2 = data test_stat = abs(group1.mean() - group2.mean()) return test_stat def MakeModel(self): """Build a model of the null hypothesis. """ group1, group2 = self.data self.n, self.m = len(group1), len(group2) self.pool = np.hstack((group1, group2)) def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ np.random.shuffle(self.pool) data = self.pool[:self.n], self.pool[self.n:] return data class DiffMeansOneSided(DiffMeansPermute): """Tests a one-sided difference in means by permutation.""" def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ group1, group2 = data test_stat = group1.mean() - group2.mean() return test_stat class DiffStdPermute(DiffMeansPermute): """Tests a one-sided difference in standard deviation by permutation.""" def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ group1, group2 = data test_stat = group1.std() - group2.std() return test_stat class CorrelationPermute(thinkstats2.HypothesisTest): """Tests correlations by permutation.""" def TestStatistic(self, data): """Computes the test statistic. data: tuple of xs and ys """ xs, ys = data test_stat = abs(thinkstats2.Corr(xs, ys)) return test_stat def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ xs, ys = self.data xs = np.random.permutation(xs) return xs, ys class DiceTest(thinkstats2.HypothesisTest): """Tests whether a six-sided die is fair.""" def TestStatistic(self, data): """Computes the test statistic. data: list of frequencies """ observed = data n = sum(observed) expected = np.ones(6) * n / 6 test_stat = sum(abs(observed - expected)) return test_stat def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ n = sum(self.data) values = [1,2,3,4,5,6] rolls = np.random.choice(values, n, replace=True) hist = thinkstats2.Hist(rolls) freqs = hist.Freqs(values) return freqs class DiceChiTest(DiceTest): """Tests a six-sided die using a chi-squared statistic.""" def TestStatistic(self, data): """Computes the test statistic. data: list of frequencies """ observed = data n = sum(observed) expected = np.ones(6) * n / 6 test_stat = sum((observed - expected)**2 / expected) return test_stat class PregLengthTest(thinkstats2.HypothesisTest): """Tests difference in pregnancy length using a chi-squared statistic.""" def TestStatistic(self, data): """Computes the test statistic. data: pair of lists of pregnancy lengths """ firsts, others = data stat = self.ChiSquared(firsts) + self.ChiSquared(others) return stat def ChiSquared(self, lengths): """Computes the chi-squared statistic. lengths: sequence of lengths returns: float """ hist = thinkstats2.Hist(lengths) observed = np.array(hist.Freqs(self.values)) expected = self.expected_probs * len(lengths) stat = sum((observed - expected)**2 / expected) return stat def MakeModel(self): """Build a model of the null hypothesis. """ firsts, others = self.data self.n = len(firsts) self.pool = np.hstack((firsts, others)) pmf = thinkstats2.Pmf(self.pool) self.values = range(35, 44) self.expected_probs = np.array(pmf.Probs(self.values)) def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ np.random.shuffle(self.pool) data = self.pool[:self.n], self.pool[self.n:] return data def RunDiceTest(): """Tests whether a die is fair. """ data = [8, 9, 19, 5, 8, 11] dt = DiceTest(data) print('dice test', dt.PValue(iters=10000)) dt = DiceChiTest(data) print('dice chi test', dt.PValue(iters=10000)) def FalseNegRate(data, num_runs=1000): """Computes the chance of a false negative based on resampling. data: pair of sequences num_runs: how many experiments to simulate returns: float false negative rate """ group1, group2 = data count = 0 for i in range(num_runs): sample1 = thinkstats2.Resample(group1) sample2 = thinkstats2.Resample(group2) ht = DiffMeansPermute((sample1, sample2)) p_value = ht.PValue(iters=101) if p_value > 0.05: count += 1 return count / num_runs def PrintTest(p_value, ht): """Prints results from a hypothesis test. p_value: float ht: HypothesisTest """ print('p-value =', p_value) print('actual =', ht.actual) print('ts max =', ht.MaxTestStat()) def RunTests(data, iters=1000): """Runs several tests on the given data. data: pair of sequences iters: number of iterations to run """ # test the difference in means ht = DiffMeansPermute(data) p_value = ht.PValue(iters=iters) print('\nmeans permute two-sided') PrintTest(p_value, ht) ht.PlotCdf() thinkplot.Save(root='hypothesis1', title='Permutation test', xlabel='difference in means (weeks)', ylabel='CDF', legend=False) # test the difference in means one-sided ht = DiffMeansOneSided(data) p_value = ht.PValue(iters=iters) print('\nmeans permute one-sided') PrintTest(p_value, ht) # test the difference in std ht = DiffStdPermute(data) p_value = ht.PValue(iters=iters) print('\nstd permute one-sided') PrintTest(p_value, ht) def ReplicateTests(): """Replicates tests with the new NSFG data.""" live, firsts, others = nsfg2.MakeFrames() # compare pregnancy lengths print('\nprglngth2') data = firsts.prglngth.values, others.prglngth.values ht = DiffMeansPermute(data) p_value = ht.PValue(iters=1000) print('means permute two-sided') PrintTest(p_value, ht) print('\nbirth weight 2') data = (firsts.totalwgt_lb.dropna().values, others.totalwgt_lb.dropna().values) ht = DiffMeansPermute(data) p_value = ht.PValue(iters=1000) print('means permute two-sided') PrintTest(p_value, ht) # test correlation live2 = live.dropna(subset=['agepreg', 'totalwgt_lb']) data = live2.agepreg.values, live2.totalwgt_lb.values ht = CorrelationPermute(data) p_value = ht.PValue() print('\nage weight correlation 2') PrintTest(p_value, ht) # compare pregnancy lengths (chi-squared) data = firsts.prglngth.values, others.prglngth.values ht = PregLengthTest(data) p_value = ht.PValue() print('\npregnancy length chi-squared 2') PrintTest(p_value, ht) def main(): thinkstats2.RandomSeed(17) # run the coin test ct = CoinTest((140, 110)) pvalue = ct.PValue() print('coin test p-value', pvalue) # compare pregnancy lengths print('\nprglngth') live, firsts, others = first.MakeFrames() data = firsts.prglngth.values, others.prglngth.values RunTests(data) # compare birth weights print('\nbirth weight') data = (firsts.totalwgt_lb.dropna().values, others.totalwgt_lb.dropna().values) ht = DiffMeansPermute(data) p_value = ht.PValue(iters=1000) print('means permute two-sided') PrintTest(p_value, ht) # test correlation live2 = live.dropna(subset=['agepreg', 'totalwgt_lb']) data = live2.agepreg.values, live2.totalwgt_lb.values ht = CorrelationPermute(data) p_value = ht.PValue() print('\nage weight correlation') print('n=', len(live2)) PrintTest(p_value, ht) # run the dice test RunDiceTest() # compare pregnancy lengths (chi-squared) data = firsts.prglngth.values, others.prglngth.values ht = PregLengthTest(data) p_value = ht.PValue() print('\npregnancy length chi-squared') PrintTest(p_value, ht) # compute the false negative rate for difference in pregnancy length data = firsts.prglngth.values, others.prglngth.values neg_rate = FalseNegRate(data) print('false neg rate', neg_rate) # run the tests with new nsfg data ReplicateTests() if __name__ == "__main__": main()
mit
mblondel/scikit-learn
sklearn/linear_model/tests/test_ransac.py
40
12814
import numpy as np from numpy.testing import assert_equal, assert_raises from numpy.testing import assert_array_almost_equal from scipy import sparse from sklearn.utils.testing import assert_less from sklearn.linear_model import LinearRegression, RANSACRegressor from sklearn.linear_model.ransac import _dynamic_max_trials # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data outliers = np.array((10, 30, 200)) data[outliers[0], :] = (1000, 1000) data[outliers[1], :] = (-1000, -1000) data[outliers[2], :] = (-100, -50) X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False X = np.random.rand(10, 2) y = np.random.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=11, random_state=0) assert getattr(ransac_estimator, 'n_trials_', None) is None ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 2) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros((100, 1))) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_residual_metric(): residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1) residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y) if __name__ == "__main__": np.testing.run_module_suite()
bsd-3-clause
richlewis42/scikit-chem
skchem/test/test_core/test_bond.py
1
3412
#! /usr/bin/env python # # Copyright (C) 2015-2016 Rich Lewis <[email protected]> # License: 3-clause BSD import pytest import numpy as np import pandas as pd from ...core import bond, Mol from . import example_mol #provides 'm' fixture @pytest.fixture(name='b') def example_bond(m): return m.bonds[0] @pytest.fixture(name='bwp') def example_bond_with_props(b): b.props['test'] = 'value' return b def test_len(m): assert len(m.bonds) == 6 def test_out_of_range(m): with pytest.raises(IndexError): m.bonds[100] def test_reverse_index(m): assert m.bonds[-1].order == 1 def test_slice(m): assert len(m.bonds[[1, 4]]) == 2 def test_repr(b): assert repr(b) == '<Bond type="O-C" at {}>'.format(hex(id(b))) def test_owner(m): # rdkit gives a copy of the object, so cant test for identity assert m.bonds[0].owner.to_smiles() == m.to_smiles() def test_index(b): assert b.index == 0 def test_to_dict(b): assert b.to_dict() == {'b': 0, 'e':1, 'o': 1} def test_index(m): assert m.bonds.index.equals(pd.RangeIndex(6, name='bond_idx')) def test_all_params_on_view(): params = list(bond.Bond.__dict__.keys()) for param in ('__doc__', '__repr__', '__str__', '__module__', 'atoms', 'props', 'owner', 'draw', 'to_dict'): params.remove(param) for param in params: assert hasattr(bond.BondView, param) def test_atoms(b): assert len(b.atoms) == 2 def test_atom_idxs(b): assert b.atom_idxs == (0, 1) test_data = [ ('order', [1, 2, 1, 1, 1, 1]), ('stereo_symbol', ['NONE', 'NONE', 'NONE', 'NONE', 'NONE', 'NONE']), ('is_in_ring', [False, False, False, False, False, False]), ('is_conjugated', [True, True, False, False, False, False]), ('is_aromatic', [False, False, False, False, False, False]) ] params = pytest.mark.parametrize('param, expected', test_data) @pytest.fixture(name='m_a') def exampole_aromatic_mol(): return Mol.from_smiles('c1ccNc1') arom_test_data = [ ('order', [1.5, 1.5, 1.5, 1.5, 1.5]), ('stereo_symbol', ['NONE', 'NONE', 'NONE', 'NONE', 'NONE']), ('is_in_ring', [True, True, True, True, True]), ('is_conjugated', [True, True, True, True, True]), ('is_aromatic', [True, True, True, True, True]) ] arom_params = pytest.mark.parametrize('param, expected', arom_test_data) @params def test_params_on_bond_view(m, param, expected): assert np.array_equal(getattr(m.bonds, param), expected) @arom_params def test_arom_params(m_a, param, expected): assert np.array_equal(getattr(m_a.bonds, param), expected) @params def test_params_on_bonds(m, param, expected): res = np.array([getattr(b, param) for b in m.bonds]) assert np.array_equal(res, expected) def test_props_keys_empty(b): assert len(b.props.keys()) == 0 def test_props_len_empty(b): assert len(b.props) == 0 def test_props_keys_full(bwp): assert len(bwp.props.keys()) == 1 assert bwp.props.keys()[0] == 'test' def test_props_len_full(bwp): assert len(bwp.props) == 1 def test_edge_adj(m): assert np.array_equal(m.bonds.adjacency_matrix(), np.array([ [0, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 1, 0, 1, 1, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 1, 0, 1], [0, 0, 0, 0, 1, 0]])) def test_atom_idx_view(m): assert m.bonds.atom_idxs.shape == (len(m.bonds), 2)
bsd-3-clause
hanfang/scikit-ribo
setup.py
1
1904
#!/usr/bin/env python import os, sys #from numpy.distutils.core import setup, Extension from setuptools import find_packages from distutils.core import setup if sys.version_info.major != 3: sys.exit("scikit-ribo can only be used with Python 3. You are currently " "running Python %d." % sys.version_info.major) setup( name='scikit_ribo', version='0.2.4b1', description = 'A scikit framework for joint analysis of Riboseq and RNAseq data', long_description=open('pypi_readme.rst').read(), url="https://github.com/hanfang/scikit-ribo", author = 'Han Fang', author_email = '[email protected]', license='GPLv2', scripts=['scikit_ribo/scikit-ribo-run.py','scikit_ribo/scikit-ribo-build.py','scikit_ribo/plot_ribo.py'], packages=find_packages(), install_requires=[ 'colorama>=0.3.7', 'gffutils>=0.8.7.1', 'glmnet_py>=0.1.0b2', 'joblib>=0.10.3', 'matplotlib>=1.5.1', 'numpy>=1.11.2', 'pandas>=0.19.2', 'pybedtools>=0.7.8', 'pyfiglet>=0.7.5', 'pysam>=0.9.1.4', 'scikit-learn>=0.18', 'scipy>=0.18.1', 'seaborn>=0.7.0', 'termcolor>=1.1.0', ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Operating System :: Unix', ], keywords='bioinformatics genomics glm glmnet ridge riboseq', )
gpl-2.0
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/stats/tests/test_weightstats.py
30
21864
'''tests for weightstats, compares with replication no failures but needs cleanup update 2012-09-09: added test after fixing bug in covariance TODOs: - I don't remember what all the commented out code is doing - should be refactored to use generator or inherited tests - still gaps in test coverage - value/diff in ttest_ind is tested in test_tost.py - what about pandas data structures? Author: Josef Perktold License: BSD (3-clause) ''' import numpy as np from scipy import stats from numpy.testing import assert_almost_equal, assert_equal, assert_allclose from statsmodels.stats.weightstats import \ DescrStatsW, CompareMeans, ttest_ind, ztest, zconfint #import statsmodels.stats.weightstats as smws class Holder(object): pass class TestWeightstats(object): def __init__(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1) x2 = m2 + np.random.randn(n2) x1_2d = m1 + np.random.randn(n1, 3) x2_2d = m2 + np.random.randn(n2, 3) w1_ = 2. * np.ones(n1) w2_ = 2. * np.ones(n2) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.x1_2d, self.x2_2d = x1_2d, x2_2d def test_weightstats_1(self): x1, x2 = self.x1, self.x2 w1, w2 = self.w1, self.w2 w1_ = 2. * np.ones(len(x1)) w2_ = 2. * np.ones(len(x2)) d1 = DescrStatsW(x1) # print ttest_ind(x1, x2) # print ttest_ind(x1, x2, usevar='unequal') # #print ttest_ind(x1, x2, usevar='unequal') # print stats.ttest_ind(x1, x2) # print ttest_ind(x1, x2, usevar='unequal', alternative='larger') # print ttest_ind(x1, x2, usevar='unequal', alternative='smaller') # print ttest_ind(x1, x2, usevar='unequal', weights=(w1_, w2_)) # print stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2]) assert_almost_equal(ttest_ind(x1, x2, weights=(w1_, w2_))[:2], stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2])) def test_weightstats_2(self): x1, x2 = self.x1, self.x2 w1, w2 = self.w1, self.w2 d1 = DescrStatsW(x1) d1w = DescrStatsW(x1, weights=w1) d2w = DescrStatsW(x2, weights=w2) x1r = d1w.asrepeats() x2r = d2w.asrepeats() # print 'random weights' # print ttest_ind(x1, x2, weights=(w1, w2)) # print stats.ttest_ind(x1r, x2r) assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], stats.ttest_ind(x1r, x2r), 14) #not the same as new version with random weights/replication # assert x1r.shape[0] == d1w.sum_weights # assert x2r.shape[0] == d2w.sum_weights assert_almost_equal(x2r.mean(0), d2w.mean, 14) assert_almost_equal(x2r.var(), d2w.var, 14) assert_almost_equal(x2r.std(), d2w.std, 14) #note: the following is for 1d assert_almost_equal(np.cov(x2r, bias=1), d2w.cov, 14) #assert_almost_equal(np.corrcoef(np.x2r), d2w.corrcoef, 19) #TODO: exception in corrcoef (scalar case) #one-sample tests # print d1.ttest_mean(3) # print stats.ttest_1samp(x1, 3) # print d1w.ttest_mean(3) # print stats.ttest_1samp(x1r, 3) assert_almost_equal(d1.ttest_mean(3)[:2], stats.ttest_1samp(x1, 3), 11) assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11) def test_weightstats_3(self): x1_2d, x2_2d = self.x1_2d, self.x2_2d w1, w2 = self.w1, self.w2 d1w_2d = DescrStatsW(x1_2d, weights=w1) d2w_2d = DescrStatsW(x2_2d, weights=w2) x1r_2d = d1w_2d.asrepeats() x2r_2d = d2w_2d.asrepeats() assert_almost_equal(x2r_2d.mean(0), d2w_2d.mean, 14) assert_almost_equal(x2r_2d.var(0), d2w_2d.var, 14) assert_almost_equal(x2r_2d.std(0), d2w_2d.std, 14) assert_almost_equal(np.cov(x2r_2d.T, bias=1), d2w_2d.cov, 14) assert_almost_equal(np.corrcoef(x2r_2d.T), d2w_2d.corrcoef, 14) # print d1w_2d.ttest_mean(3) # #scipy.stats.ttest is also vectorized # print stats.ttest_1samp(x1r_2d, 3) t,p,d = d1w_2d.ttest_mean(3) assert_almost_equal([t, p], stats.ttest_1samp(x1r_2d, 3), 11) #print [stats.ttest_1samp(xi, 3) for xi in x1r_2d.T] cm = CompareMeans(d1w_2d, d2w_2d) ressm = cm.ttest_ind() resss = stats.ttest_ind(x1r_2d, x2r_2d) assert_almost_equal(ressm[:2], resss, 14) ## #doesn't work for 2d, levene doesn't use weights ## cm = CompareMeans(d1w_2d, d2w_2d) ## ressm = cm.test_equal_var() ## resss = stats.levene(x1r_2d, x2r_2d) ## assert_almost_equal(ressm[:2], resss, 14) def test_weightstats_ddof_tests(self): # explicit test that ttest and confint are independent of ddof # one sample case x1_2d = self.x1_2d w1 = self.w1 d1w_d0 = DescrStatsW(x1_2d, weights=w1, ddof=0) d1w_d1 = DescrStatsW(x1_2d, weights=w1, ddof=1) d1w_d2 = DescrStatsW(x1_2d, weights=w1, ddof=2) #check confint independent of user ddof res0 = d1w_d0.ttest_mean() res1 = d1w_d1.ttest_mean() res2 = d1w_d2.ttest_mean() # concatenate into one array with np.r_ assert_almost_equal(np.r_[res1], np.r_[res0], 14) assert_almost_equal(np.r_[res2], np.r_[res0], 14) res0 = d1w_d0.ttest_mean(0.5) res1 = d1w_d1.ttest_mean(0.5) res2 = d1w_d2.ttest_mean(0.5) assert_almost_equal(np.r_[res1], np.r_[res0], 14) assert_almost_equal(np.r_[res2], np.r_[res0], 14) #check confint independent of user ddof res0 = d1w_d0.tconfint_mean() res1 = d1w_d1.tconfint_mean() res2 = d1w_d2.tconfint_mean() assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) class CheckWeightstats1dMixin(object): def test_basic(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(x1r.mean(0), d1w.mean, 14) assert_almost_equal(x1r.var(0, ddof=d1w.ddof), d1w.var, 14) assert_almost_equal(x1r.std(0, ddof=d1w.ddof), d1w.std, 14) var1 = d1w.var_ddof(ddof=1) assert_almost_equal(x1r.var(0, ddof=1), var1, 14) std1 = d1w.std_ddof(ddof=1) assert_almost_equal(x1r.std(0, ddof=1), std1, 14) assert_almost_equal(np.cov(x1r.T, bias=1-d1w.ddof), d1w.cov, 14) # #assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14) def test_ttest(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11) # def # assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], # stats.ttest_ind(x1r, x2r), 14) def test_ttest_2sample(self): x1, x2 = self.x1, self.x2 x1r, x2r = self.x1r, self.x2r w1, w2 = self.w1, self.w2 #Note: stats.ttest_ind handles 2d/nd arguments res_sp = stats.ttest_ind(x1r, x2r) assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], res_sp, 14) #check correct ttest independent of user ddof cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=1)) assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14) cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1), DescrStatsW(x2, weights=w2, ddof=2)) assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14) cm0 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=0)) cm1 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=1)) cm2 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1), DescrStatsW(x2, weights=w2, ddof=2)) res0 = cm0.ttest_ind(usevar='unequal') res1 = cm1.ttest_ind(usevar='unequal') res2 = cm2.ttest_ind(usevar='unequal') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) #check confint independent of user ddof res0 = cm0.tconfint_diff(usevar='pooled') res1 = cm1.tconfint_diff(usevar='pooled') res2 = cm2.tconfint_diff(usevar='pooled') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) res0 = cm0.tconfint_diff(usevar='unequal') res1 = cm1.tconfint_diff(usevar='unequal') res2 = cm2.tconfint_diff(usevar='unequal') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) def test_confint_mean(self): #compare confint_mean with ttest d1w = self.d1w alpha = 0.05 low, upp = d1w.tconfint_mean() t, p, d = d1w.ttest_mean(low) assert_almost_equal(p, alpha * np.ones(p.shape), 8) t, p, d = d1w.ttest_mean(upp) assert_almost_equal(p, alpha * np.ones(p.shape), 8) t, p, d = d1w.ttest_mean(np.vstack((low, upp))) assert_almost_equal(p, alpha * np.ones(p.shape), 8) class CheckWeightstats2dMixin(CheckWeightstats1dMixin): def test_corr(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14) class TestWeightstats1d_ddof(CheckWeightstats1dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 1) x2 = m2 + np.random.randn(n2, 1) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1, ddof=1) self.d2w = DescrStatsW(x2, weights=w2, ddof=1) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() class TestWeightstats2d(CheckWeightstats2dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1_ = 2. * np.ones(n1) w2_ = 2. * np.ones(n2) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1) self.d2w = DescrStatsW(x2, weights=w2) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() class TestWeightstats2d_ddof(CheckWeightstats2dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1, ddof=1) self.d2w = DescrStatsW(x2, weights=w2, ddof=1) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() class TestWeightstats2d_nobs(CheckWeightstats2dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,30 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1, ddof=0) self.d2w = DescrStatsW(x2, weights=w2, ddof=1) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() def test_ttest_ind_with_uneq_var(): #from scipy # check vs. R a = (1, 2, 3) b = (1.1, 2.9, 4.2) pr = 0.53619490753126731 tr = -0.68649512735572582 t, p, df = ttest_ind(a, b, usevar='unequal') assert_almost_equal([t,p], [tr, pr], 13) a = (1, 2, 3, 4) pr = 0.84354139131608286 tr = -0.2108663315950719 t, p, df = ttest_ind(a, b, usevar='unequal') assert_almost_equal([t,p], [tr, pr], 13) def test_ztest_ztost(): # compare weightstats with separately tested proportion ztest ztost import statsmodels.stats.proportion as smprop x1 = [0, 1] w1 = [5, 15] res2 = smprop.proportions_ztest(15, 20., value=0.5) d1 = DescrStatsW(x1, w1) res1 = d1.ztest_mean(0.5) assert_allclose(res1, res2, rtol=0.03, atol=0.003) d2 = DescrStatsW(x1, np.array(w1)*21./20) res1 = d2.ztest_mean(0.5) assert_almost_equal(res1, res2, decimal=12) res1 = d2.ztost_mean(0.4, 0.6) res2 = smprop.proportions_ztost(15, 20., 0.4, 0.6) assert_almost_equal(res1[0], res2[0], decimal=12) x2 = [0, 1] w2 = [10, 10] #d2 = DescrStatsW(x1, np.array(w1)*21./20) d2 = DescrStatsW(x2, w2) res1 = ztest(d1.asrepeats(), d2.asrepeats()) res2 = smprop.proportions_chisquare(np.asarray([15, 10]), np.asarray([20., 20])) #TODO: check this is this difference expected?, see test_proportion assert_allclose(res1[1], res2[1], rtol=0.03) res1a = CompareMeans(d1, d2).ztest_ind() assert_allclose(res1a[1], res2[1], rtol=0.03) assert_almost_equal(res1a, res1, decimal=12) ###### test for ztest and z confidence interval against R BSDA z.test # Note: I needed to calculate the pooled standard deviation for R # std = np.std(np.concatenate((x-x.mean(),y-y.mean())), ddof=2) #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667) #> cat_items(zt, "ztest.") ztest_ = Holder() ztest_.statistic = 6.55109865675183 ztest_.p_value = 5.711530850508982e-11 ztest_.conf_int = np.array([1.230415246535603, 2.280948389828034]) ztest_.estimate = np.array([7.01818181818182, 5.2625]) ztest_.null_value = 0 ztest_.alternative = 'two.sided' ztest_.method = 'Two-sample z-Test' ztest_.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="less") #> cat_items(zt, "ztest_smaller.") ztest_smaller = Holder() ztest_smaller.statistic = 6.55109865675183 ztest_smaller.p_value = 0.999999999971442 ztest_smaller.conf_int = np.array([np.nan, 2.196499421109045]) ztest_smaller.estimate = np.array([7.01818181818182, 5.2625]) ztest_smaller.null_value = 0 ztest_smaller.alternative = 'less' ztest_smaller.method = 'Two-sample z-Test' ztest_smaller.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="greater") #> cat_items(zt, "ztest_larger.") ztest_larger = Holder() ztest_larger.statistic = 6.55109865675183 ztest_larger.p_value = 2.855760072861813e-11 ztest_larger.conf_int = np.array([1.314864215254592, np.nan]) ztest_larger.estimate = np.array([7.01818181818182, 5.2625 ]) ztest_larger.null_value = 0 ztest_larger.alternative = 'greater' ztest_larger.method = 'Two-sample z-Test' ztest_larger.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="two.sided") #> cat_items(zt, "ztest_mu.") ztest_mu = Holder() ztest_mu.statistic = 2.81972854805176 ztest_mu.p_value = 0.00480642898427981 ztest_mu.conf_int = np.array([1.230415246535603, 2.280948389828034]) ztest_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_mu.null_value = 1 ztest_mu.alternative = 'two.sided' ztest_mu.method = 'Two-sample z-Test' ztest_mu.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="greater") #> cat_items(zt, "ztest_larger_mu.") ztest_larger_mu = Holder() ztest_larger_mu.statistic = 2.81972854805176 ztest_larger_mu.p_value = 0.002403214492139871 ztest_larger_mu.conf_int = np.array([1.314864215254592, np.nan]) ztest_larger_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_larger_mu.null_value = 1 ztest_larger_mu.alternative = 'greater' ztest_larger_mu.method = 'Two-sample z-Test' ztest_larger_mu.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=2, alternative="less") #> cat_items(zt, "ztest_smaller_mu.") ztest_smaller_mu = Holder() ztest_smaller_mu.statistic = -0.911641560648313 ztest_smaller_mu.p_value = 0.1809787183191324 ztest_smaller_mu.conf_int = np.array([np.nan, 2.196499421109045]) ztest_smaller_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_smaller_mu.null_value = 2 ztest_smaller_mu.alternative = 'less' ztest_smaller_mu.method = 'Two-sample z-Test' ztest_smaller_mu.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="two.sided") #> cat_items(zt, "ztest_mu_1s.") ztest_mu_1s = Holder() ztest_mu_1s.statistic = 4.415212090914452 ztest_mu_1s.p_value = 1.009110038015147e-05 ztest_mu_1s.conf_int = np.array([6.74376372125119, 7.29259991511245]) ztest_mu_1s.estimate = 7.01818181818182 ztest_mu_1s.null_value = 6.4 ztest_mu_1s.alternative = 'two.sided' ztest_mu_1s.method = 'One-sample z-Test' ztest_mu_1s.data_name = 'x' #> zt = z.test(x, sigma.x=0.46436662631627995, mu=7.4, alternative="less") #> cat_items(zt, "ztest_smaller_mu_1s.") ztest_smaller_mu_1s = Holder() ztest_smaller_mu_1s.statistic = -2.727042762035397 ztest_smaller_mu_1s.p_value = 0.00319523783881176 ztest_smaller_mu_1s.conf_int = np.array([np.nan, 7.248480744895716]) ztest_smaller_mu_1s.estimate = 7.01818181818182 ztest_smaller_mu_1s.null_value = 7.4 ztest_smaller_mu_1s.alternative = 'less' ztest_smaller_mu_1s.method = 'One-sample z-Test' ztest_smaller_mu_1s.data_name = 'x' #> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="greater") #> cat_items(zt, "ztest_greater_mu_1s.") ztest_larger_mu_1s = Holder() ztest_larger_mu_1s.statistic = 4.415212090914452 ztest_larger_mu_1s.p_value = 5.045550190097003e-06 ztest_larger_mu_1s.conf_int = np.array([6.78788289146792, np.nan]) ztest_larger_mu_1s.estimate = 7.01818181818182 ztest_larger_mu_1s.null_value = 6.4 ztest_larger_mu_1s.alternative = 'greater' ztest_larger_mu_1s.method = 'One-sample z-Test' ztest_larger_mu_1s.data_name = 'x' alternatives = {'less' : 'smaller', 'greater' : 'larger', 'two.sided' : 'two-sided'} class TestZTest(object): # all examples use the same data # no weights used in tests @classmethod def setup_class(cls): cls.x1 = np.array([7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8]) cls.x2 = np.array([4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5]) cls.d1 = DescrStatsW(cls.x1) cls.d2 = DescrStatsW(cls.x2) cls.cm = CompareMeans(cls.d1, cls.d2) def test(self): x1, x2 = self.x1, self.x2 cm = self.cm # tc : test cases for tc in [ztest_, ztest_smaller, ztest_larger, ztest_mu, ztest_smaller_mu, ztest_larger_mu]: zstat, pval = ztest(x1, x2, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) zstat, pval = cm.ztest_ind(value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) #overwrite nan in R's confint tc_conf_int = tc.conf_int.copy() if np.isnan(tc_conf_int[0]): tc_conf_int[0] = - np.inf if np.isnan(tc_conf_int[1]): tc_conf_int[1] = np.inf # Note: value is shifting our confidence interval in zconfint ci = zconfint(x1, x2, value=0, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = cm.zconfint_diff(alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = zconfint(x1, x2, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int - tc.null_value, rtol=1e-10) # 1 sample test copy-paste d1 = self.d1 for tc in [ztest_mu_1s, ztest_smaller_mu_1s, ztest_larger_mu_1s]: zstat, pval = ztest(x1, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) zstat, pval = d1.ztest_mean(value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) #overwrite nan in R's confint tc_conf_int = tc.conf_int.copy() if np.isnan(tc_conf_int[0]): tc_conf_int[0] = - np.inf if np.isnan(tc_conf_int[1]): tc_conf_int[1] = np.inf # Note: value is shifting our confidence interval in zconfint ci = zconfint(x1, value=0, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = d1.zconfint_mean(alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10)
apache-2.0
Dino-China/OPaparazzi-CN
sw/tools/calibration/calibrate_gyro.py
87
4686
#! /usr/bin/env python # Copyright (C) 2010 Antoine Drouin # # This file is part of Paparazzi. # # Paparazzi is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # Paparazzi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Paparazzi; see the file COPYING. If not, write to # the Free Software Foundation, 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. # # # calibrate gyrometers using turntable measurements # from __future__ import print_function, division from optparse import OptionParser import os import sys from scipy import linspace, polyval, stats import matplotlib.pyplot as plt import calibration_utils # # lisa 3 # p : a=-4511.16 b=31948.34, std error= 0.603 # q : a=-4598.46 b=31834.48, std error= 0.734 # r : a=-4525.63 b=32687.95, std error= 0.624 # # lisa 4 # p : a=-4492.05 b=32684.94, std error= 0.600 # q : a=-4369.63 b=33260.96, std error= 0.710 # r : a=-4577.13 b=32707.72, std error= 0.730 # # crista # p : a= 3864.82 b=31288.09, std error= 0.866 # q : a= 3793.71 b=32593.89, std error= 3.070 # r : a= 3817.11 b=32709.70, std error= 3.296 # def main(): usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options." parser = OptionParser(usage) parser.add_option("-i", "--id", dest="ac_id", action="store", type=int, default=-1, help="aircraft id to use") parser.add_option("-t", "--tt_id", dest="tt_id", action="store", type=int, default=-1, help="turntable id to use") parser.add_option("-a", "--axis", dest="axis", type="choice", choices=['p', 'q', 'r'], help="axis to calibrate (p, q, r)", action="store") parser.add_option("-v", "--verbose", action="store_true", dest="verbose") (options, args) = parser.parse_args() if len(args) != 1: parser.error("incorrect number of arguments") else: if os.path.isfile(args[0]): filename = args[0] else: print(args[0] + " not found") sys.exit(1) if not filename.endswith(".data"): parser.error("Please specify a *.data log file") if options.ac_id < 0 or options.ac_id > 255: parser.error("Specify a valid aircraft id number!") if options.tt_id < 0 or options.tt_id > 255: parser.error("Specify a valid turntable id number!") if options.verbose: print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id)) samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7) if len(samples) == 0: print("Error: found zero matching messages in log file!") print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename) sys.exit(1) if options.verbose: print("found "+str(len(samples))+" records") if options.axis == 'p': axis_idx = 1 elif options.axis == 'q': axis_idx = 2 elif options.axis == 'r': axis_idx = 3 else: parser.error("Specify a valid axis!") #Linear regression using stats.linregress t = samples[:, 0] xn = samples[:, axis_idx] (a_s, b_s, r, tt, stderr) = stats.linregress(t, xn) print('Linear regression using stats.linregress') print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr))) print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s))) print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s))) # # overlay fited value # ovl_omega = linspace(1, 7.5, 10) ovl_adc = polyval([a_s, b_s], ovl_omega) plt.title('Linear Regression Example') plt.subplot(3, 1, 1) plt.plot(samples[:, 1]) plt.plot(samples[:, 2]) plt.plot(samples[:, 3]) plt.legend(['p', 'q', 'r']) plt.subplot(3, 1, 2) plt.plot(samples[:, 0]) plt.subplot(3, 1, 3) plt.plot(samples[:, 0], samples[:, axis_idx], 'b.') plt.plot(ovl_omega, ovl_adc, 'r') plt.show() if __name__ == "__main__": main()
gpl-2.0
pkruskal/scikit-learn
sklearn/metrics/tests/test_common.py
83
41144
from __future__ import division, print_function from functools import partial from itertools import product import numpy as np import scipy.sparse as sp from sklearn.datasets import make_multilabel_classification from sklearn.preprocessing import LabelBinarizer from sklearn.utils.multiclass import type_of_target from sklearn.utils.validation import check_random_state from sklearn.utils import shuffle from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import ignore_warnings from sklearn.metrics import accuracy_score from sklearn.metrics import average_precision_score from sklearn.metrics import brier_score_loss from sklearn.metrics import cohen_kappa_score from sklearn.metrics import confusion_matrix from sklearn.metrics import coverage_error from sklearn.metrics import explained_variance_score from sklearn.metrics import f1_score from sklearn.metrics import fbeta_score from sklearn.metrics import hamming_loss from sklearn.metrics import hinge_loss from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import label_ranking_average_precision_score from sklearn.metrics import label_ranking_loss from sklearn.metrics import log_loss from sklearn.metrics import matthews_corrcoef from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import precision_score from sklearn.metrics import r2_score from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score from sklearn.metrics import zero_one_loss # TODO Curve are currently not coverd by invariance test # from sklearn.metrics import precision_recall_curve # from sklearn.metrics import roc_curve from sklearn.metrics.base import _average_binary_score # Note toward developers about metric testing # ------------------------------------------- # It is often possible to write one general test for several metrics: # # - invariance properties, e.g. invariance to sample order # - common behavior for an argument, e.g. the "normalize" with value True # will return the mean of the metrics and with value False will return # the sum of the metrics. # # In order to improve the overall metric testing, it is a good idea to write # first a specific test for the given metric and then add a general test for # all metrics that have the same behavior. # # Two types of datastructures are used in order to implement this system: # dictionaries of metrics and lists of metrics wit common properties. # # Dictionaries of metrics # ------------------------ # The goal of having those dictionaries is to have an easy way to call a # particular metric and associate a name to each function: # # - REGRESSION_METRICS: all regression metrics. # - CLASSIFICATION_METRICS: all classification metrics # which compare a ground truth and the estimated targets as returned by a # classifier. # - THRESHOLDED_METRICS: all classification metrics which # compare a ground truth and a score, e.g. estimated probabilities or # decision function (format might vary) # # Those dictionaries will be used to test systematically some invariance # properties, e.g. invariance toward several input layout. # REGRESSION_METRICS = { "mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "median_absolute_error": median_absolute_error, "explained_variance_score": explained_variance_score, "r2_score": r2_score, } CLASSIFICATION_METRICS = { "accuracy_score": accuracy_score, "unnormalized_accuracy_score": partial(accuracy_score, normalize=False), "confusion_matrix": confusion_matrix, "hamming_loss": hamming_loss, "jaccard_similarity_score": jaccard_similarity_score, "unnormalized_jaccard_similarity_score": partial(jaccard_similarity_score, normalize=False), "zero_one_loss": zero_one_loss, "unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False), # These are needed to test averaging "precision_score": precision_score, "recall_score": recall_score, "f1_score": f1_score, "f2_score": partial(fbeta_score, beta=2), "f0.5_score": partial(fbeta_score, beta=0.5), "matthews_corrcoef_score": matthews_corrcoef, "weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5), "weighted_f1_score": partial(f1_score, average="weighted"), "weighted_f2_score": partial(fbeta_score, average="weighted", beta=2), "weighted_precision_score": partial(precision_score, average="weighted"), "weighted_recall_score": partial(recall_score, average="weighted"), "micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5), "micro_f1_score": partial(f1_score, average="micro"), "micro_f2_score": partial(fbeta_score, average="micro", beta=2), "micro_precision_score": partial(precision_score, average="micro"), "micro_recall_score": partial(recall_score, average="micro"), "macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5), "macro_f1_score": partial(f1_score, average="macro"), "macro_f2_score": partial(fbeta_score, average="macro", beta=2), "macro_precision_score": partial(precision_score, average="macro"), "macro_recall_score": partial(recall_score, average="macro"), "samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5), "samples_f1_score": partial(f1_score, average="samples"), "samples_f2_score": partial(fbeta_score, average="samples", beta=2), "samples_precision_score": partial(precision_score, average="samples"), "samples_recall_score": partial(recall_score, average="samples"), "cohen_kappa_score": cohen_kappa_score, } THRESHOLDED_METRICS = { "coverage_error": coverage_error, "label_ranking_loss": label_ranking_loss, "log_loss": log_loss, "unnormalized_log_loss": partial(log_loss, normalize=False), "hinge_loss": hinge_loss, "brier_score_loss": brier_score_loss, "roc_auc_score": roc_auc_score, "weighted_roc_auc": partial(roc_auc_score, average="weighted"), "samples_roc_auc": partial(roc_auc_score, average="samples"), "micro_roc_auc": partial(roc_auc_score, average="micro"), "macro_roc_auc": partial(roc_auc_score, average="macro"), "average_precision_score": average_precision_score, "weighted_average_precision_score": partial(average_precision_score, average="weighted"), "samples_average_precision_score": partial(average_precision_score, average="samples"), "micro_average_precision_score": partial(average_precision_score, average="micro"), "macro_average_precision_score": partial(average_precision_score, average="macro"), "label_ranking_average_precision_score": label_ranking_average_precision_score, } ALL_METRICS = dict() ALL_METRICS.update(THRESHOLDED_METRICS) ALL_METRICS.update(CLASSIFICATION_METRICS) ALL_METRICS.update(REGRESSION_METRICS) # Lists of metrics with common properties # --------------------------------------- # Lists of metrics with common properties are used to test systematically some # functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that # are symmetric with respect to their input argument y_true and y_pred. # # When you add a new metric or functionality, check if a general test # is already written. # Metric undefined with "binary" or "multiclass" input METRIC_UNDEFINED_MULTICLASS = [ "samples_f0.5_score", "samples_f1_score", "samples_f2_score", "samples_precision_score", "samples_recall_score", # Those metrics don't support multiclass outputs "average_precision_score", "weighted_average_precision_score", "micro_average_precision_score", "macro_average_precision_score", "samples_average_precision_score", "label_ranking_average_precision_score", "roc_auc_score", "micro_roc_auc", "weighted_roc_auc", "macro_roc_auc", "samples_roc_auc", "coverage_error", "brier_score_loss", "label_ranking_loss", ] # Metrics with an "average" argument METRICS_WITH_AVERAGING = [ "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score" ] # Treshold-based metrics with an "average" argument THRESHOLDED_METRICS_WITH_AVERAGING = [ "roc_auc_score", "average_precision_score", ] # Metrics with a "pos_label" argument METRICS_WITH_POS_LABEL = [ "roc_curve", "brier_score_loss", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", # pos_label support deprecated; to be removed in 0.18: "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", ] # Metrics with a "labels" argument # TODO: Handle multi_class metrics that has a labels argument as well as a # decision function argument. e.g hinge_loss METRICS_WITH_LABELS = [ "confusion_matrix", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "cohen_kappa_score", ] # Metrics with a "normalize" option METRICS_WITH_NORMALIZE_OPTION = [ "accuracy_score", "jaccard_similarity_score", "zero_one_loss", ] # Threshold-based metrics with "multilabel-indicator" format support THRESHOLDED_MULTILABEL_METRICS = [ "log_loss", "unnormalized_log_loss", "roc_auc_score", "weighted_roc_auc", "samples_roc_auc", "micro_roc_auc", "macro_roc_auc", "average_precision_score", "weighted_average_precision_score", "samples_average_precision_score", "micro_average_precision_score", "macro_average_precision_score", "coverage_error", "label_ranking_loss", ] # Classification metrics with "multilabel-indicator" format MULTILABELS_METRICS = [ "accuracy_score", "unnormalized_accuracy_score", "hamming_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "zero_one_loss", "unnormalized_zero_one_loss", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "samples_f0.5_score", "samples_f1_score", "samples_f2_score", "samples_precision_score", "samples_recall_score", ] # Regression metrics with "multioutput-continuous" format support MULTIOUTPUT_METRICS = [ "mean_absolute_error", "mean_squared_error", "r2_score", "explained_variance_score" ] # Symmetric with respect to their input arguments y_true and y_pred # metric(y_true, y_pred) == metric(y_pred, y_true). SYMMETRIC_METRICS = [ "accuracy_score", "unnormalized_accuracy_score", "hamming_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "zero_one_loss", "unnormalized_zero_one_loss", "f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score", "matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error", "median_absolute_error", "cohen_kappa_score", ] # Asymmetric with respect to their input arguments y_true and y_pred # metric(y_true, y_pred) != metric(y_pred, y_true). NOT_SYMMETRIC_METRICS = [ "explained_variance_score", "r2_score", "confusion_matrix", "precision_score", "recall_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "log_loss", "hinge_loss" ] # No Sample weight support METRICS_WITHOUT_SAMPLE_WEIGHT = [ "cohen_kappa_score", "confusion_matrix", "hamming_loss", "matthews_corrcoef_score", "median_absolute_error", ] @ignore_warnings def test_symmetry(): # Test the symmetry of score and loss functions random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(20, )) y_pred = random_state.randint(0, 2, size=(20, )) # We shouldn't forget any metrics assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS, METRIC_UNDEFINED_MULTICLASS), set(ALL_METRICS)) assert_equal( set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)), set([])) # Symmetric metric for name in SYMMETRIC_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_pred), metric(y_pred, y_true), err_msg="%s is not symmetric" % name) # Not symmetric metrics for name in NOT_SYMMETRIC_METRICS: metric = ALL_METRICS[name] assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)), msg="%s seems to be symmetric" % name) @ignore_warnings def test_sample_order_invariance(): random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(20, )) y_pred = random_state.randint(0, 2, size=(20, )) y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0) for name, metric in ALL_METRICS.items(): if name in METRIC_UNDEFINED_MULTICLASS: continue assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) @ignore_warnings def test_sample_order_invariance_multilabel_and_multioutput(): random_state = check_random_state(0) # Generate some data y_true = random_state.randint(0, 2, size=(20, 25)) y_pred = random_state.randint(0, 2, size=(20, 25)) y_score = random_state.normal(size=y_true.shape) y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true, y_pred, y_score, random_state=0) for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) for name in THRESHOLDED_MULTILABEL_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_score), metric(y_true_shuffle, y_score_shuffle), err_msg="%s is not sample order invariant" % name) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_score), metric(y_true_shuffle, y_score_shuffle), err_msg="%s is not sample order invariant" % name) assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) @ignore_warnings def test_format_invariance_with_1d_vectors(): random_state = check_random_state(0) y1 = random_state.randint(0, 2, size=(20, )) y2 = random_state.randint(0, 2, size=(20, )) y1_list = list(y1) y2_list = list(y2) y1_1d, y2_1d = np.array(y1), np.array(y2) assert_equal(y1_1d.ndim, 1) assert_equal(y2_1d.ndim, 1) y1_column = np.reshape(y1_1d, (-1, 1)) y2_column = np.reshape(y2_1d, (-1, 1)) y1_row = np.reshape(y1_1d, (1, -1)) y2_row = np.reshape(y2_1d, (1, -1)) for name, metric in ALL_METRICS.items(): if name in METRIC_UNDEFINED_MULTICLASS: continue measure = metric(y1, y2) assert_almost_equal(metric(y1_list, y2_list), measure, err_msg="%s is not representation invariant " "with list" % name) assert_almost_equal(metric(y1_1d, y2_1d), measure, err_msg="%s is not representation invariant " "with np-array-1d" % name) assert_almost_equal(metric(y1_column, y2_column), measure, err_msg="%s is not representation invariant " "with np-array-column" % name) # Mix format support assert_almost_equal(metric(y1_1d, y2_list), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and list" % name) assert_almost_equal(metric(y1_list, y2_1d), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and list" % name) assert_almost_equal(metric(y1_1d, y2_column), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and np-array-column" % name) assert_almost_equal(metric(y1_column, y2_1d), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and np-array-column" % name) assert_almost_equal(metric(y1_list, y2_column), measure, err_msg="%s is not representation invariant " "with mix list and np-array-column" % name) assert_almost_equal(metric(y1_column, y2_list), measure, err_msg="%s is not representation invariant " "with mix list and np-array-column" % name) # These mix representations aren't allowed assert_raises(ValueError, metric, y1_1d, y2_row) assert_raises(ValueError, metric, y1_row, y2_1d) assert_raises(ValueError, metric, y1_list, y2_row) assert_raises(ValueError, metric, y1_row, y2_list) assert_raises(ValueError, metric, y1_column, y2_row) assert_raises(ValueError, metric, y1_row, y2_column) # NB: We do not test for y1_row, y2_row as these may be # interpreted as multilabel or multioutput data. if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS + MULTILABELS_METRICS)): assert_raises(ValueError, metric, y1_row, y2_row) @ignore_warnings def test_invariance_string_vs_numbers_labels(): # Ensure that classification metrics with string labels random_state = check_random_state(0) y1 = random_state.randint(0, 2, size=(20, )) y2 = random_state.randint(0, 2, size=(20, )) y1_str = np.array(["eggs", "spam"])[y1] y2_str = np.array(["eggs", "spam"])[y2] pos_label_str = "spam" labels_str = ["eggs", "spam"] for name, metric in CLASSIFICATION_METRICS.items(): if name in METRIC_UNDEFINED_MULTICLASS: continue measure_with_number = metric(y1, y2) # Ugly, but handle case with a pos_label and label metric_str = metric if name in METRICS_WITH_POS_LABEL: metric_str = partial(metric_str, pos_label=pos_label_str) measure_with_str = metric_str(y1_str, y2_str) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number invariance " "test".format(name)) measure_with_strobj = metric_str(y1_str.astype('O'), y2_str.astype('O')) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string object vs number " "invariance test".format(name)) if name in METRICS_WITH_LABELS: metric_str = partial(metric_str, labels=labels_str) measure_with_str = metric_str(y1_str, y2_str) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number " "invariance test".format(name)) measure_with_strobj = metric_str(y1_str.astype('O'), y2_str.astype('O')) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string vs number " "invariance test".format(name)) for name, metric in THRESHOLDED_METRICS.items(): if name in ("log_loss", "hinge_loss", "unnormalized_log_loss", "brier_score_loss"): # Ugly, but handle case with a pos_label and label metric_str = metric if name in METRICS_WITH_POS_LABEL: metric_str = partial(metric_str, pos_label=pos_label_str) measure_with_number = metric(y1, y2) measure_with_str = metric_str(y1_str, y2) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number " "invariance test".format(name)) measure_with_strobj = metric(y1_str.astype('O'), y2) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string object vs number " "invariance test".format(name)) else: # TODO those metrics doesn't support string label yet assert_raises(ValueError, metric, y1_str, y2) assert_raises(ValueError, metric, y1_str.astype('O'), y2) @ignore_warnings def check_single_sample(name): # Non-regression test: scores should work with a single sample. # This is important for leave-one-out cross validation. # Score functions tested are those that formerly called np.squeeze, # which turns an array of size 1 into a 0-d array (!). metric = ALL_METRICS[name] # assert that no exception is thrown for i, j in product([0, 1], repeat=2): metric([i], [j]) @ignore_warnings def check_single_sample_multioutput(name): metric = ALL_METRICS[name] for i, j, k, l in product([0, 1], repeat=4): metric(np.array([[i, j]]), np.array([[k, l]])) def test_single_sample(): for name in ALL_METRICS: if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS: # Those metrics are not always defined with one sample # or in multiclass classification continue yield check_single_sample, name for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS: yield check_single_sample_multioutput, name def test_multioutput_number_of_output_differ(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0], [1, 0], [0, 0]]) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] assert_raises(ValueError, metric, y_true, y_pred) def test_multioutput_regression_invariance_to_dimension_shuffling(): # test invariance to dimension shuffling random_state = check_random_state(0) y_true = random_state.uniform(0, 2, size=(20, 5)) y_pred = random_state.uniform(0, 2, size=(20, 5)) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] error = metric(y_true, y_pred) for _ in range(3): perm = random_state.permutation(y_true.shape[1]) assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]), error, err_msg="%s is not dimension shuffling " "invariant" % name) @ignore_warnings def test_multilabel_representation_invariance(): # Generate some data n_classes = 4 n_samples = 50 _, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=0, n_samples=n_samples, allow_unlabeled=True) _, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=1, n_samples=n_samples, allow_unlabeled=True) # To make sure at least one empty label is present y1 += [0]*n_classes y2 += [0]*n_classes y1_sparse_indicator = sp.coo_matrix(y1) y2_sparse_indicator = sp.coo_matrix(y2) for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] # XXX cruel hack to work with partial functions if isinstance(metric, partial): metric.__module__ = 'tmp' metric.__name__ = name measure = metric(y1, y2) # Check representation invariance assert_almost_equal(metric(y1_sparse_indicator, y2_sparse_indicator), measure, err_msg="%s failed representation invariance " "between dense and sparse indicator " "formats." % name) def test_raise_value_error_multilabel_sequences(): # make sure the multilabel-sequence format raises ValueError multilabel_sequences = [ [[0, 1]], [[1], [2], [0, 1]], [(), (2), (0, 1)], [[]], [()], np.array([[], [1, 2]], dtype='object')] for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] for seq in multilabel_sequences: assert_raises(ValueError, metric, seq, seq) def test_normalize_option_binary_classification(n_samples=20): # Test in the binary case random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(n_samples, )) y_pred = random_state.randint(0, 2, size=(n_samples, )) for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure) def test_normalize_option_multiclasss_classification(): # Test in the multiclass case random_state = check_random_state(0) y_true = random_state.randint(0, 4, size=(20, )) y_pred = random_state.randint(0, 4, size=(20, )) n_samples = y_true.shape[0] for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure) def test_normalize_option_multilabel_classification(): # Test in the multilabel case n_classes = 4 n_samples = 100 # for both random_state 0 and 1, y_true and y_pred has at least one # unlabelled entry _, y_true = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=0, allow_unlabeled=True, n_samples=n_samples) _, y_pred = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=1, allow_unlabeled=True, n_samples=n_samples) # To make sure at least one empty label is present y_true += [0]*n_classes y_pred += [0]*n_classes for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure, err_msg="Failed with %s" % name) @ignore_warnings def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel): n_samples, n_classes = y_true_binarize.shape # No averaging label_measure = metric(y_true, y_pred, average=None) assert_array_almost_equal(label_measure, [metric(y_true_binarize[:, i], y_pred_binarize[:, i]) for i in range(n_classes)]) # Micro measure micro_measure = metric(y_true, y_pred, average="micro") assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel())) # Macro measure macro_measure = metric(y_true, y_pred, average="macro") assert_almost_equal(macro_measure, np.mean(label_measure)) # Weighted measure weights = np.sum(y_true_binarize, axis=0, dtype=int) if np.sum(weights) != 0: weighted_measure = metric(y_true, y_pred, average="weighted") assert_almost_equal(weighted_measure, np.average(label_measure, weights=weights)) else: weighted_measure = metric(y_true, y_pred, average="weighted") assert_almost_equal(weighted_measure, 0) # Sample measure if is_multilabel: sample_measure = metric(y_true, y_pred, average="samples") assert_almost_equal(sample_measure, np.mean([metric(y_true_binarize[i], y_pred_binarize[i]) for i in range(n_samples)])) assert_raises(ValueError, metric, y_true, y_pred, average="unknown") assert_raises(ValueError, metric, y_true, y_pred, average="garbage") def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score): is_multilabel = type_of_target(y_true).startswith("multilabel") metric = ALL_METRICS[name] if name in METRICS_WITH_AVERAGING: _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel) elif name in THRESHOLDED_METRICS_WITH_AVERAGING: _check_averaging(metric, y_true, y_score, y_true_binarize, y_score, is_multilabel) else: raise ValueError("Metric is not recorded as having an average option") def test_averaging_multiclass(n_samples=50, n_classes=3): random_state = check_random_state(0) y_true = random_state.randint(0, n_classes, size=(n_samples, )) y_pred = random_state.randint(0, n_classes, size=(n_samples, )) y_score = random_state.uniform(size=(n_samples, n_classes)) lb = LabelBinarizer().fit(y_true) y_true_binarize = lb.transform(y_true) y_pred_binarize = lb.transform(y_pred) for name in METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) def test_averaging_multilabel(n_classes=5, n_samples=40): _, y = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=5, n_samples=n_samples, allow_unlabeled=False) y_true = y[:20] y_pred = y[20:] y_score = check_random_state(0).normal(size=(20, n_classes)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) def test_averaging_multilabel_all_zeroes(): y_true = np.zeros((20, 3)) y_pred = np.zeros((20, 3)) y_score = np.zeros((20, 3)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) # Test _average_binary_score for weight.sum() == 0 binary_metric = (lambda y_true, y_score, average="macro": _average_binary_score( precision_score, y_true, y_score, average)) _check_averaging(binary_metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel=True) def test_averaging_multilabel_all_ones(): y_true = np.ones((20, 3)) y_pred = np.ones((20, 3)) y_score = np.ones((20, 3)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) @ignore_warnings def check_sample_weight_invariance(name, metric, y1, y2): rng = np.random.RandomState(0) sample_weight = rng.randint(1, 10, size=len(y1)) # check that unit weights gives the same score as no weight unweighted_score = metric(y1, y2, sample_weight=None) assert_almost_equal( unweighted_score, metric(y1, y2, sample_weight=np.ones(shape=len(y1))), err_msg="For %s sample_weight=None is not equivalent to " "sample_weight=ones" % name) # check that the weighted and unweighted scores are unequal weighted_score = metric(y1, y2, sample_weight=sample_weight) assert_not_equal( unweighted_score, weighted_score, msg="Unweighted and weighted scores are unexpectedly " "equal (%f) for %s" % (weighted_score, name)) # check that sample_weight can be a list weighted_score_list = metric(y1, y2, sample_weight=sample_weight.tolist()) assert_almost_equal( weighted_score, weighted_score_list, err_msg="Weighted scores for array and list sample_weight input are " "not equal (%f != %f) for %s" % ( weighted_score, weighted_score_list, name)) # check that integer weights is the same as repeated samples repeat_weighted_score = metric( np.repeat(y1, sample_weight, axis=0), np.repeat(y2, sample_weight, axis=0), sample_weight=None) assert_almost_equal( weighted_score, repeat_weighted_score, err_msg="Weighting %s is not equal to repeating samples" % name) # check that ignoring a fraction of the samples is equivalent to setting # the corresponding weights to zero sample_weight_subset = sample_weight[1::2] sample_weight_zeroed = np.copy(sample_weight) sample_weight_zeroed[::2] = 0 y1_subset = y1[1::2] y2_subset = y2[1::2] weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset) weighted_score_zeroed = metric(y1, y2, sample_weight=sample_weight_zeroed) assert_almost_equal( weighted_score_subset, weighted_score_zeroed, err_msg=("Zeroing weights does not give the same result as " "removing the corresponding samples (%f != %f) for %s" % (weighted_score_zeroed, weighted_score_subset, name))) if not name.startswith('unnormalized'): # check that the score is invariant under scaling of the weights by a # common factor for scaling in [2, 0.3]: assert_almost_equal( weighted_score, metric(y1, y2, sample_weight=sample_weight * scaling), err_msg="%s sample_weight is not invariant " "under scaling" % name) # Check that if sample_weight.shape[0] != y_true.shape[0], it raised an # error assert_raises(Exception, metric, y1, y2, sample_weight=np.hstack([sample_weight, sample_weight])) def test_sample_weight_invariance(n_samples=50): random_state = check_random_state(0) # binary output random_state = check_random_state(0) y_true = random_state.randint(0, 2, size=(n_samples, )) y_pred = random_state.randint(0, 2, size=(n_samples, )) y_score = random_state.random_sample(size=(n_samples,)) for name in ALL_METRICS: if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or name in METRIC_UNDEFINED_MULTICLASS): continue metric = ALL_METRICS[name] if name in THRESHOLDED_METRICS: yield check_sample_weight_invariance, name, metric, y_true, y_score else: yield check_sample_weight_invariance, name, metric, y_true, y_pred # multiclass random_state = check_random_state(0) y_true = random_state.randint(0, 5, size=(n_samples, )) y_pred = random_state.randint(0, 5, size=(n_samples, )) y_score = random_state.random_sample(size=(n_samples, 5)) for name in ALL_METRICS: if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or name in METRIC_UNDEFINED_MULTICLASS): continue metric = ALL_METRICS[name] if name in THRESHOLDED_METRICS: yield check_sample_weight_invariance, name, metric, y_true, y_score else: yield check_sample_weight_invariance, name, metric, y_true, y_pred # multilabel indicator _, ya = make_multilabel_classification(n_features=1, n_classes=20, random_state=0, n_samples=100, allow_unlabeled=False) _, yb = make_multilabel_classification(n_features=1, n_classes=20, random_state=1, n_samples=100, allow_unlabeled=False) y_true = np.vstack([ya, yb]) y_pred = np.vstack([ya, ya]) y_score = random_state.randint(1, 4, size=y_true.shape) for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS + MULTIOUTPUT_METRICS): if name in METRICS_WITHOUT_SAMPLE_WEIGHT: continue metric = ALL_METRICS[name] if name in THRESHOLDED_METRICS: yield (check_sample_weight_invariance, name, metric, y_true, y_score) else: yield (check_sample_weight_invariance, name, metric, y_true, y_pred) def test_no_averaging_labels(): # test labels argument when not using averaging # in multi-class and multi-label cases y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]]) y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]]) y_true_multiclass = np.array([0, 1, 2]) y_pred_multiclass = np.array([0, 2, 3]) labels = np.array([3, 0, 1, 2]) _, inverse_labels = np.unique(labels, return_inverse=True) for name in METRICS_WITH_AVERAGING: for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass], [y_true_multilabel, y_pred_multilabel]]: if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0: continue metric = ALL_METRICS[name] score_labels = metric(y_true, y_pred, labels=labels, average=None) score = metric(y_true, y_pred, average=None) assert_array_equal(score_labels, score[inverse_labels])
bsd-3-clause
pompiduskus/scikit-learn
sklearn/svm/setup.py
321
3157
import os from os.path import join import numpy from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('svm', parent_package, top_path) config.add_subpackage('tests') # Section LibSVM # we compile both libsvm and libsvm_sparse config.add_library('libsvm-skl', sources=[join('src', 'libsvm', 'libsvm_template.cpp')], depends=[join('src', 'libsvm', 'svm.cpp'), join('src', 'libsvm', 'svm.h')], # Force C++ linking in case gcc is picked up instead # of g++ under windows with some versions of MinGW extra_link_args=['-lstdc++'], ) libsvm_sources = ['libsvm.c'] libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'), join('src', 'libsvm', 'libsvm_template.cpp'), join('src', 'libsvm', 'svm.cpp'), join('src', 'libsvm', 'svm.h')] config.add_extension('libsvm', sources=libsvm_sources, include_dirs=[numpy.get_include(), join('src', 'libsvm')], libraries=['libsvm-skl'], depends=libsvm_depends, ) ### liblinear module cblas_libs, blas_info = get_blas_info() if os.name == 'posix': cblas_libs.append('m') liblinear_sources = ['liblinear.c', join('src', 'liblinear', '*.cpp')] liblinear_depends = [join('src', 'liblinear', '*.h'), join('src', 'liblinear', 'liblinear_helper.c')] config.add_extension('liblinear', sources=liblinear_sources, libraries=cblas_libs, include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=blas_info.pop('extra_compile_args', []), depends=liblinear_depends, # extra_compile_args=['-O0 -fno-inline'], ** blas_info) ## end liblinear module # this should go *after* libsvm-skl libsvm_sparse_sources = ['libsvm_sparse.c'] config.add_extension('libsvm_sparse', libraries=['libsvm-skl'], sources=libsvm_sparse_sources, include_dirs=[numpy.get_include(), join("src", "libsvm")], depends=[join("src", "libsvm", "svm.h"), join("src", "libsvm", "libsvm_sparse_helper.c")]) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
yunfeilu/scikit-learn
examples/semi_supervised/plot_label_propagation_digits_active_learning.py
294
3417
""" ======================================== Label Propagation digits active learning ======================================== Demonstrates an active learning technique to learn handwritten digits using label propagation. We start by training a label propagation model with only 10 labeled points, then we select the top five most uncertain points to label. Next, we train with 15 labeled points (original 10 + 5 new ones). We repeat this process four times to have a model trained with 30 labeled examples. A plot will appear showing the top 5 most uncertain digits for each iteration of training. These may or may not contain mistakes, but we will train the next model with their true labels. """ print(__doc__) # Authors: Clay Woolam <[email protected]> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn import datasets from sklearn.semi_supervised import label_propagation from sklearn.metrics import classification_report, confusion_matrix digits = datasets.load_digits() rng = np.random.RandomState(0) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:330]] y = digits.target[indices[:330]] images = digits.images[indices[:330]] n_total_samples = len(y) n_labeled_points = 10 unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:] f = plt.figure() for i in range(5): y_train = np.copy(y) y_train[unlabeled_indices] = -1 lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_indices] true_labels = y[unlabeled_indices] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print('Iteration %i %s' % (i, 70 * '_')) print("Label Spreading model: %d labeled & %d unlabeled (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # compute the entropies of transduced label distributions pred_entropies = stats.distributions.entropy( lp_model.label_distributions_.T) # select five digit examples that the classifier is most uncertain about uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:] # keep track of indices that we get labels for delete_indices = np.array([]) f.text(.05, (1 - (i + 1) * .183), "model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(5, 5, index + 1 + (5 * i)) sub.imshow(image, cmap=plt.cm.gray_r) sub.set_title('predict: %i\ntrue: %i' % ( lp_model.transduction_[image_index], y[image_index]), size=10) sub.axis('off') # labeling 5 points, remote from labeled set delete_index, = np.where(unlabeled_indices == image_index) delete_indices = np.concatenate((delete_indices, delete_index)) unlabeled_indices = np.delete(unlabeled_indices, delete_indices) n_labeled_points += 5 f.suptitle("Active learning with Label Propagation.\nRows show 5 most " "uncertain labels to learn with the next model.") plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45) plt.show()
bsd-3-clause
bfollinprm/Nquintessence
cosmoslik/cosmoslik_plugins/likelihoods/spt_lowl/spt_lowl.py
1
3759
from numpy import array, fromstring, loadtxt, dot, arange, diag, hstack, zeros from scipy.linalg import cho_factor, cho_solve from cosmoslik import SlikPlugin import os.path as osp from itertools import takewhile class spt_lowl(SlikPlugin): def __init__(self, which=None, lmin=None, **kwargs): super(spt_lowl,self).__init__(**kwargs) if which=='s12': newdat_file = 'data/s12/spt_lps12_20120828/Spectrum_spt2500deg2_lps12_alternativeCalibrationImplementation.newdat' elif which=='k11': newdat_file = 'data/k11/bandpowers/Spectrum_spt20082009.newdat' else: raise ValueError("spt_lowl: 'which' must be one of ['s12','k11']") newdat_file = osp.join(osp.dirname(__file__),newdat_file) #Load spectrum and covariance with open(newdat_file) as f: window_dir = osp.dirname(f.readline()) while 'TT' not in f.readline(): pass self.spec=array([fromstring(f.readline(),sep=' ')[1] for _ in range(47)]) self.sigma=array([fromstring(f.readline(),sep=' ') for _ in range(94)])[47:] #Load windows self.windows = [loadtxt(osp.join(osp.dirname(newdat_file),'windows',window_dir,'window_%i'%i))[:,1] for i in range(1,48)] if lmin is not None: bmin = sum(1 for _ in takewhile(lambda x: x<lmin, (sum(1 for _ in takewhile(lambda x: abs(x)<.001,w) ) for w in self.windows))) self.spec = self.spec[bmin:] self.sigma = self.sigma[bmin:,bmin:] self.windows = self.windows[bmin:] self.cho_sigma = cho_factor(self.sigma) self.windowrange = (lambda x: slice(min(x),max(x)+1))(loadtxt(osp.join(osp.dirname(newdat_file),'windows',window_dir,'window_1'))[:,0]) self.lmax = self.windowrange.stop self.ells = array([dot(arange(10000)[self.windowrange],w) for w in self.windows]) self.freq = {'dust':154, 'radio': 151, 'tsz':153} self.fluxcut = 50 def __call__(self, cmb, egfs, acalib=1): cl = self.get_cl_model(cmb,egfs) #Apply windows and calculate likelihood dcl = acalib*self.spec-cl return dot(dcl,cho_solve(self.cho_sigma, dcl))/2 def plot(self,p, cl=None, ax=None,fig=None, residuals=False, show_comps=True, comps_kw={}): if cl==None: cl = self.get_cl_model(p['_model']) if ax is None: if fig==None: from matplotlib.pyplot import figure fig=figure() ax=fig.add_subplot(111) if not residuals: ax.errorbar(self.ells,self.spec,yerr=diag(self.sigma[0]),fmt='.',label='SPT K11') ax.plot(self.ells,cl) if show_comps: ax.plot(p['_model']['cl_TT'],c='b') p['_model']['egfs']('cl_TT', lmax=self.lmax, freqs=(self.freq,self.freq), fluxcut=self.fluxcut, plot=True, ax=ax, **comps_kw) else: ax.errorbar(self.ells,self.spec-cl,yerr=diag(self.sigma[0]),fmt='.',label='SPT K11') ax.plot([self.ells[0],self.ells[-1]],[0]*2) def get_cl_model(self, cmb, egfs): #Get CMB + foreground model cl = (hstack([cmb['cl_TT'],zeros(self.lmax)])[:self.lmax] + egfs(spectra='cl_TT', lmax=self.lmax, freqs=(self.freq,self.freq), fluxcut=self.fluxcut)) #Apply window functions return array([dot(cl[self.windowrange],w) for w in self.windows])
mit
Pyomo/PyomoGallery
row_generation_mst/mst.py
1
3163
import pyomo import pyomo.opt import pyomo.environ as pe import pandas import networkx class MSTRowGeneration: """A class to find Minimum Spanning Tree using a row-generation algorithm.""" def __init__(self, nfile): """The input is a CSV file describing the undirected network's edges.""" self.df = pandas.read_csv(nfile) self.createRelaxedModel() def createRelaxedModel(self): """Create the relaxed model, without any subtour elimination constraints.""" df = self.df node_set = set( list( df.startNode ) + list(df.destNode) ) # Create the model and sets m = pe.ConcreteModel() df.set_index(['startNode','destNode'], inplace=True) edge_set = df.index.unique() m.edge_set = pe.Set(initialize=edge_set, dimen=2) m.node_set = pe.Set(initialize=node_set) # Define variables m.Y = pe.Var(m.edge_set, domain=pe.Binary) # Objective def obj_rule(m): return sum( m.Y[e] * df.ix[e,'dist'] for e in m.edge_set) m.OBJ = pe.Objective(rule=obj_rule, sense=pe.minimize) # Add the n-1 constraint def simple_const_rule(m): return sum( m.Y[e] for e in m.edge_set ) == len(node_set) - 1 m.simpleConst = pe.Constraint(rule = simple_const_rule) # Empty constraint list for subtour elimination constraints # This is where the generated rows will go m.ccConstraints = pe.ConstraintList() self.m = m def convertYsToNetworkx(self): """Convert the model's Y variables into a networkx object.""" ans = networkx.Graph() edges = [e for e in self.m.edge_set if self.m.Y[e].value > .99] ans.add_edges_from(edges) return ans def solve(self): """Solve for the MST, using row generation for subtour elimination constraints.""" def createConstForCC(m, cc): cc = dict.fromkeys(cc) return sum( m.Y[e] for e in m.edge_set if ((e[0] in cc) and (e[1] in cc))) <= len(cc) - 1 if not hasattr(self, 'solver'): solver = pyomo.opt.SolverFactory('gurobi') done = False while not done: # Solve once and add subtour elimination constraints if necessary # Finish when there are no more subtours results = solver.solve(self.m, tee=False, keepfiles=False, options_string="mip_tolerances_integrality=1e-9 mip_tolerances_mipgap=0") # Construct a graph from the answer, and look for subtours graph = self.convertYsToNetworkx() ccs = list(networkx.connected_component_subgraphs(graph)) for cc in ccs: print('Adding constraint for connected component:') print(cc.nodes()) print(createConstForCC(self.m, cc)) print('--------------\n') self.m.ccConstraints.add( createConstForCC(self.m, cc) ) if ccs[0].number_of_nodes() == len(self.m.node_set): done = True mst = MSTRowGeneration('mst.csv') mst.solve() mst.m.Y.pprint() print(mst.m.OBJ())
bsd-2-clause
andysim/psi4
psi4/driver/qcdb/psivardefs.py
3
14794
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2017 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # try: from collections import OrderedDict except ImportError: from .oldpymodules import OrderedDict def sapt_psivars(): """Returns dictionary of PsiVariable definitions. """ pv1 = OrderedDict() pv1['SAPT EXCHSCAL1'] = {'func': lambda x: 1.0 if x[0] < 1.0e-5 else x[0] / x[1], 'args': ['SAPT EXCH10 ENERGY', 'SAPT EXCH10(S^2) ENERGY']} # special treatment in pandas pv1['SAPT EXCHSCAL3'] = {'func': lambda x: x[0] ** 3, 'args': ['SAPT EXCHSCAL1']} pv1['SAPT EXCHSCAL'] = {'func': lambda x: x[0] ** x[1], 'args': ['SAPT EXCHSCAL1', 'SAPT ALPHA']} pv1['SAPT HF(2) ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]), 'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SAPT HF(2) ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SAPT HF(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']} pv1['SAPT MP2(2) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[3] + x[4] + x[0] * (x[5] + x[6] + x[7] + x[8])), 'args': ['SAPT EXCHSCAL', 'SAPT MP2 CORRELATION ENERGY', 'SAPT ELST12,R ENERGY', # MP2 CORRELATION ENERGY renamed here from pandas since this is IE # renamed again SA --> SAPT 'SAPT IND22 ENERGY', 'SAPT DISP20 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT EXCH-DISP20 ENERGY']} pv1['SAPT MP2(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT MP2(2) ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']} pv1['SAPT MP4 DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4] + x[5], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY', 'SAPT DISP21 ENERGY', 'SAPT DISP22(SDQ) ENERGY', 'SAPT EST.DISP22(T) ENERGY']} pv1['SAPT CCD DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP2(CCD) ENERGY', 'SAPT DISP22(S)(CCD) ENERGY', 'SAPT EST.DISP22(T)(CCD) ENERGY']} pv1['SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY']} pv1['SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT EXCH10 ENERGY']} pv1['SAPT0 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} pv1['SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY', 'SAPT0 EXCH ENERGY', 'SAPT0 IND ENERGY', 'SAPT0 DISP ENERGY']} pv1['SSAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']} pv1['SSAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']} pv1['SSAPT0 IND ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2], 'args': ['SAPT EXCHSCAL3', 'SAPT0 IND ENERGY', 'SAPT EXCH-IND20,R ENERGY']} pv1['SSAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2], 'args': ['SAPT EXCHSCAL3', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} pv1['SSAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SSAPT0 ELST ENERGY', 'SSAPT0 EXCH ENERGY', 'SSAPT0 IND ENERGY', 'SSAPT0 DISP ENERGY']} pv1['SCS-SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']} pv1['SCS-SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']} pv1['SCS-SAPT0 IND ENERGY'] = {'func': sum, 'args': ['SAPT0 IND ENERGY']} pv1['SCS-SAPT0 DISP ENERGY'] = {'func': lambda x: (x[0] - x[3]) * (x[1] + x[2]) + x[3] * (x[4] + x[5]), 'args': [0.66, 'SAPT SAME-SPIN EXCH-DISP20 ENERGY', 'SAPT SAME-SPIN DISP20 ENERGY', 1.2, 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} # note no xs for SCS disp pv1['SCS-SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SCS-SAPT0 ELST ENERGY', 'SCS-SAPT0 EXCH ENERGY', 'SCS-SAPT0 IND ENERGY', 'SCS-SAPT0 DISP ENERGY']} pv1['SAPT2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']} pv1['SAPT2 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']} pv1['SAPT2 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2], 'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} pv1['SAPT2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2 ELST ENERGY', 'SAPT2 EXCH ENERGY', 'SAPT2 IND ENERGY', 'SAPT2 DISP ENERGY']} pv1['SAPT2+ ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']} pv1['SAPT2+ EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2+ IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']} pv1['SAPT2+ DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP']} pv1['SAPT2+ TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY', 'SAPT2+ EXCH ENERGY', 'SAPT2+ IND ENERGY', 'SAPT2+ DISP ENERGY']} pv1['SAPT2+(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']} pv1['SAPT2+(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']} pv1['SAPT2+(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY']} pv1['SAPT2+(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP']} pv1['SAPT2+(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) ELST ENERGY', 'SAPT2+(CCD) EXCH ENERGY', 'SAPT2+(CCD) IND ENERGY', 'SAPT2+(CCD) DISP ENERGY']} pv1['SAPT2+DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']} pv1['SAPT2+DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']} pv1['SAPT2+DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY', 'SAPT MP2(2) ENERGY']} pv1['SAPT2+DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+ DISP ENERGY']} pv1['SAPT2+DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 ELST ENERGY', 'SAPT2+DMP2 EXCH ENERGY', 'SAPT2+DMP2 IND ENERGY', 'SAPT2+DMP2 DISP ENERGY']} pv1['SAPT2+(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']} pv1['SAPT2+(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']} pv1['SAPT2+(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 IND ENERGY']} pv1['SAPT2+(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) DISP ENERGY']} pv1['SAPT2+(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD)DMP2 ELST ENERGY', 'SAPT2+(CCD)DMP2 EXCH ENERGY', 'SAPT2+(CCD)DMP2 IND ENERGY', 'SAPT2+(CCD)DMP2 DISP ENERGY']} pv1['SAPT2+(3) ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']} pv1['SAPT2+(3) EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2+(3) IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']} pv1['SAPT2+(3) DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP', 'SAPT DISP30 ENERGY']} pv1['SAPT2+(3) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY', 'SAPT2+(3) EXCH ENERGY', 'SAPT2+(3) IND ENERGY', 'SAPT2+(3) DISP ENERGY']} pv1['SAPT2+(3)(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']} pv1['SAPT2+(3)(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']} pv1['SAPT2+(3)(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY']} pv1['SAPT2+(3)(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP', 'SAPT DISP30 ENERGY']} pv1['SAPT2+(3)(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) ELST ENERGY', 'SAPT2+(3)(CCD) EXCH ENERGY', 'SAPT2+(3)(CCD) IND ENERGY', 'SAPT2+(3)(CCD) DISP ENERGY']} pv1['SAPT2+(3)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']} pv1['SAPT2+(3)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']} pv1['SAPT2+(3)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY', 'SAPT MP2(2) ENERGY']} pv1['SAPT2+(3)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) DISP ENERGY']} pv1['SAPT2+(3)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 ELST ENERGY', 'SAPT2+(3)DMP2 EXCH ENERGY', 'SAPT2+(3)DMP2 IND ENERGY', 'SAPT2+(3)DMP2 DISP ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 IND ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) DISP ENERGY']} pv1['SAPT2+(3)(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD)DMP2 ELST ENERGY', 'SAPT2+(3)(CCD)DMP2 EXCH ENERGY', 'SAPT2+(3)(CCD)DMP2 IND ENERGY', 'SAPT2+(3)(CCD)DMP2 DISP ENERGY']} pv1['SAPT2+3 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']} pv1['SAPT2+3 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]), 'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']} pv1['SAPT2+3 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5] + x[6] + x[0] * x[7], 'args': ['SAPT EXCHSCAL', 'SAPT HF(3) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY', 'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']} pv1['SAPT2+3 DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT MP4 DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']} pv1['SAPT2+3 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY', 'SAPT2+3 EXCH ENERGY', 'SAPT2+3 IND ENERGY', 'SAPT2+3 DISP ENERGY']} pv1['SAPT2+3(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']} pv1['SAPT2+3(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']} pv1['SAPT2+3(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY']} pv1['SAPT2+3(CCD) DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5], 'args': ['SAPT EXCHSCAL', 'SAPT CCD DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']} pv1['SAPT2+3(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) ELST ENERGY', 'SAPT2+3(CCD) EXCH ENERGY', 'SAPT2+3(CCD) IND ENERGY', 'SAPT2+3(CCD) DISP ENERGY']} pv1['SAPT2+3DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']} pv1['SAPT2+3DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']} pv1['SAPT2+3DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY', 'SAPT MP2(3) ENERGY']} pv1['SAPT2+3DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3 DISP ENERGY']} pv1['SAPT2+3DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 ELST ENERGY', 'SAPT2+3DMP2 EXCH ENERGY', 'SAPT2+3DMP2 IND ENERGY', 'SAPT2+3DMP2 DISP ENERGY']} pv1['SAPT2+3(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']} pv1['SAPT2+3(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']} pv1['SAPT2+3(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 IND ENERGY']} pv1['SAPT2+3(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) DISP ENERGY']} pv1['SAPT2+3(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD)DMP2 ELST ENERGY', 'SAPT2+3(CCD)DMP2 EXCH ENERGY', 'SAPT2+3(CCD)DMP2 IND ENERGY', 'SAPT2+3(CCD)DMP2 DISP ENERGY']} return pv1
gpl-2.0
rahul-c1/scikit-learn
doc/conf.py
11
8021
# -*- coding: utf-8 -*- # # scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function import sys import os from sklearn.externals.six import u # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve # -- General configuration --------------------------------------------------- # Try to override the matplotlib configuration as early as possible try: import gen_rst except: pass # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['gen_rst', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.pngmath', 'numpy_ext.numpydoc', 'sphinx.ext.linkcode', ] autosummary_generate = True autodoc_default_flags = ['members', 'inherited-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # generate autosummary even if no references autosummary_generate = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # Generate the plots for the gallery plot_gallery = True # The master toctree document. master_doc = 'index' # General information about the project. project = u('scikit-learn') copyright = u('2010 - 2014, scikit-learn developers (BSD License)') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.16-git' # The full version, including alpha/beta/rc tags. import sklearn release = sklearn.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be # searched for source files. exclude_trees = ['_build', 'templates', 'includes'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'scikit-learn' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'oldversion': False, 'collapsiblesidebar': True, 'google_analytics': True, 'surveybanner': False, 'sprintbanner': True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'scikit-learn' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'logos/scikit-learn-logo-small.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'logos/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'scikit-learndoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'), u('scikit-learn developers'), 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "logos/scikit-learn-logo.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} \usepackage{enumitem} \setlistdepth{10} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True trim_doctests_flags = True def setup(app): # to hide/show the prompt in code examples: app.add_javascript('js/copybutton.js') # to format example galleries: app.add_javascript('js/examples.js') # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve('sklearn', u'https://github.com/scikit-learn/' 'scikit-learn/blob/{revision}/' '{package}/{path}#L{lineno}')
bsd-3-clause
RapidApplicationDevelopment/tensorflow
tensorflow/contrib/learn/python/learn/experiment.py
5
16349
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Experiment class collecting information needed for a single training run.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import math import time from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import monitors from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import server_lib __all__ = ["Experiment"] class Experiment(object): """Experiment is a class containing all information needed to train a model. After an experiment is created (by passing an Estimator and inputs for training and evaluation), an Experiment instance knows how to invoke training and eval loops in a sensible fashion for distributed training. """ # TODO(ispir): remove delay_workers_by_global_step and make global step based # waiting as only behaviour. @deprecated_arg_values( "2016-10-23", "local_eval_frequency is deprecated as local_run will be renamed to " "train_and_evaluate. Use min_eval_frequency and call train_and_evaluate " "instead. Note, however, that the default for min_eval_frequency is 1, " "meaning models will be evaluated every time a new checkpoint is " "available. In contrast, the default for local_eval_frequency is None, " "resulting in evaluation occurring only after training has completed. " "min_eval_frequency is ignored when calling the deprecated local_run.", local_eval_frequency=None) def __init__(self, estimator, train_input_fn, eval_input_fn, eval_metrics=None, train_steps=None, eval_steps=100, train_monitors=None, local_eval_frequency=None, eval_delay_secs=120, continuous_eval_throttle_secs=60, min_eval_frequency=1, delay_workers_by_global_step=False): """Constructor for `Experiment`. Creates an Experiment instance. None of the functions passed to this constructor are executed at construction time. They are stored and used when a method is executed which requires it. Args: estimator: Object implementing `Trainable` and `Evaluable`. train_input_fn: function, returns features and labels for training. eval_input_fn: function, returns features and labels for evaluation. If `eval_steps` is `None`, this should be configured only to produce for a finite number of batches (generally, 1 epoch over the evaluation data). eval_metrics: `dict` of string, metric function. If `None`, default set is used. train_steps: Perform this many steps of training. `None`, the default, means train forever. eval_steps: `evaluate` runs until input is exhausted (or another exception is raised), or for `eval_steps` steps, if specified. train_monitors: A list of monitors to pass to the `Estimator`'s `fit` function. local_eval_frequency: Frequency of running eval in steps, when running locally. If `None`, runs evaluation only at the end of training. eval_delay_secs: Start evaluating after waiting for this many seconds. continuous_eval_throttle_secs: Do not re-evaluate unless the last evaluation was started at least this many seconds ago for continuous_eval(). min_eval_frequency: (applies only to train_and_evaluate). the minimum number of steps between evaluations. Of course, evaluation does not occur if no new snapshot is available, hence, this is the minimum. delay_workers_by_global_step: if `True` delays training workers based on global step instead of time. Raises: ValueError: if `estimator` does not implement `Evaluable` and `Trainable`. """ if not isinstance(estimator, evaluable.Evaluable): raise ValueError("`estimator` must implement `Evaluable`.") if not isinstance(estimator, trainable.Trainable): raise ValueError("`estimator` must implement `Trainable`.") super(Experiment, self).__init__() self._estimator = estimator self._train_input_fn = train_input_fn self._eval_input_fn = eval_input_fn self._eval_metrics = eval_metrics self._train_steps = train_steps self._eval_steps = eval_steps self._train_monitors = train_monitors or [] self._local_eval_frequency = local_eval_frequency self._eval_delay_secs = eval_delay_secs self._continuous_eval_throttle_secs = continuous_eval_throttle_secs self._min_eval_frequency = min_eval_frequency self._delay_workers_by_global_step = delay_workers_by_global_step @property def estimator(self): return self._estimator def train(self, delay_secs=None): """Fit the estimator using the training data. Train the estimator for `self._train_steps` steps, after waiting for `delay_secs` seconds. If `self._train_steps` is `None`, train forever. Args: delay_secs: Start training after this many seconds. Returns: The trained estimator. """ start = time.time() # Start the server, if needed. It's important to start the server before # we (optionally) sleep for the case where no device_filters are set. # Otherwise, the servers will wait to connect to each other before starting # to train. We might as well start as soon as we can. config = self._estimator.config if (config.environment != run_config.Environment.LOCAL and config.environment != run_config.Environment.GOOGLE and config.cluster_spec and config.master): self._start_server() extra_hooks = [] if delay_secs is None: task_id = self._estimator.config.task_id or 0 if self._delay_workers_by_global_step: # Wait 5500 global steps for the second worker. Each worker waits more # then previous one but with a diminishing number of steps. extra_hooks.append( basic_session_run_hooks.GlobalStepWaiterHook( int(8000.0 * math.log(task_id + 1)))) delay_secs = 0 else: # Wait 5 secs more for each new worker up to 60 secs. delay_secs = min(60, task_id * 5) if delay_secs > 0: elapsed_secs = time.time() - start remaining = delay_secs - elapsed_secs logging.info("Waiting %d secs before starting training.", remaining) time.sleep(delay_secs) return self._estimator.fit(input_fn=self._train_input_fn, max_steps=self._train_steps, monitors=self._train_monitors + extra_hooks) def evaluate(self, delay_secs=None): """Evaluate on the evaluation data. Runs evaluation on the evaluation data and returns the result. Runs for `self._eval_steps` steps, or if it's `None`, then run until input is exhausted or another exception is raised. Start the evaluation after `delay_secs` seconds, or if it's `None`, defaults to using `self._eval_delay_secs` seconds. Args: delay_secs: Start evaluating after this many seconds. If `None`, defaults to using `self._eval_delays_secs`. Returns: The result of the `evaluate` call to the `Estimator`. """ if delay_secs is None: delay_secs = self._eval_delay_secs if delay_secs: logging.info("Waiting %d secs before starting eval.", delay_secs) time.sleep(delay_secs) return self._estimator.evaluate(input_fn=self._eval_input_fn, steps=self._eval_steps, metrics=self._eval_metrics, name="one_pass") @deprecated( "2016-10-23", "local_run will be renamed to train_and_evaluate and the new default " "behavior will be to run evaluation every time there is a new " "checkpoint.") def local_run(self): with _new_attr_context(self, "_min_eval_frequency"): self._min_eval_frequency = self._local_eval_frequency return self.train_and_evaluate() def _continuous_eval(self, input_fn, name, delay_secs, throttle_delay_secs): """Run continuous eval. Runs infinite eval on the evaluation data set. This function starts evaluating after `delay_secs` seconds and then runs no more than one evaluation (with `self._eval_steps` steps each time) per `throttle_delay_secs`. It never returns. Args: input_fn: The input to use for this eval. name: A string appended to the folder name of evaluation results. delay_secs: Start evaluating after this many seconds. If None, defaults to self._eval_delay_secs. throttle_delay_secs: Do not re-evaluate unless the last evaluation was started at least this many seconds ago. If None, defaults to self._continuous_eval_throttle_secs. """ if delay_secs is None: delay_secs = self._eval_delay_secs if throttle_delay_secs is None: throttle_delay_secs = self._continuous_eval_throttle_secs if delay_secs: logging.info("Waiting %f secs before starting eval.", delay_secs) time.sleep(delay_secs) last_fitted_error_time = 0 while True: start = time.time() try: self._estimator.evaluate(input_fn=input_fn, steps=self._eval_steps, metrics=self._eval_metrics, name=name) except NotFittedError: # Print warning message every 10 mins. if time.time() - last_fitted_error_time > 600: logging.warning( "Estimator is not fitted yet. " "Will start an evaluation when a checkpoint will be ready.") last_fitted_error_time = time.time() duration = time.time() - start if duration < throttle_delay_secs: difference = throttle_delay_secs - duration logging.info("Waiting %f secs before starting next eval run.", difference) time.sleep(difference) def continuous_eval(self, delay_secs=None, throttle_delay_secs=None): self._continuous_eval(self._eval_input_fn, name="continuous", delay_secs=delay_secs, throttle_delay_secs=throttle_delay_secs) def continuous_eval_on_train_data(self, delay_secs=None, throttle_delay_secs=None): self._continuous_eval(self._train_input_fn, name="continuous_on_train_data", delay_secs=delay_secs, throttle_delay_secs=throttle_delay_secs) def train_and_evaluate(self): """Interleaves training and evaluation. The frequency of evaluation is controlled by the contructor arg `min_eval_frequency`. When this parameter is None or 0, evaluation happens only after training has completed. Note that evaluation cannot happen more frequently than checkpoints are taken. If no new snapshots are available when evaluation is supposed to occur, then evaluation doesn't happen for another `min_eval_frequency` steps (assuming a checkpoint is available at that point). Thus, settings `min_eval_frequency` to 1 means that the model will be evaluated everytime there is a new checkpoint. This is particular useful for a "Master" task in the cloud, whose responsibility it is to take checkpoints, evaluate those checkpoints, and write out summaries. Participating in training as the supervisor allows such a task to accomplish the first and last items, while performing evaluation allows for the second. Returns: The result of the `evaluate` call to the `Estimator`. """ # The directory to which evaluation summaries are written are determined # by adding a suffix to 'eval'; that suffix is the 'name' parameter to # the various evaluate(...) methods. By setting it to None, we force # the directory name to simply be 'eval'. eval_dir_suffix = None # We set every_n_steps to 1, but evaluation only occurs when a new # snapshot is available. If, by the time we finish evaluation # there is a new snapshot, then we just evaluate again. Otherwise, # we keep training until one becomes available. with _new_attr_context(self, "_train_monitors"): self._train_monitors = self._train_monitors or [] if self._min_eval_frequency: self._train_monitors += [monitors.ValidationMonitor( input_fn=self._eval_input_fn, eval_steps=self._eval_steps, metrics=self._eval_metrics, every_n_steps=self._min_eval_frequency, name=eval_dir_suffix, )] self.train(delay_secs=0) return self._estimator.evaluate(input_fn=self._eval_input_fn, steps=self._eval_steps, metrics=self._eval_metrics, name=eval_dir_suffix) def run_std_server(self): """Starts a TensorFlow server and joins the serving thread. Typically used for parameter servers. Raises: ValueError: if not enough information is available in the estimator's config to create a server. """ self._start_server().join() def test(self): """Tests training and evaluating the estimator both for a single step. Returns: The result of the `evaluate` call to the `Estimator`. """ self._estimator.fit(input_fn=self._train_input_fn, steps=1, monitors=self._train_monitors) return self._estimator.evaluate(input_fn=self._eval_input_fn, steps=1, metrics=self._eval_metrics, name="one_pass") def _start_server(self): """Creates, starts, and returns a server_lib.Server.""" config = self._estimator.config if (not config.cluster_spec or not config.task_type or not config.master or config.task_id is None): raise ValueError("Could not start server; be sure to specify " "cluster_spec, task_type, master, and task in " "RunConfig or set the TF_CONFIG environment variable.") server = server_lib.Server( config.cluster_spec, job_name=config.task_type, task_index=config.task_id, config=config.tf_config, start=False) server.start() return server @contextlib.contextmanager def _new_attr_context(obj, attr): """Creates a new context in which an object's attribute can be changed. This creates a context in which an object's attribute can be changed. Once the context is exited, the attribute reverts to its original value. Example usage: my_obj.x = 1 with _new_attr_context(my_obj, "x"): my_obj.x = 2 print(my_obj.x) print(my_obj.x) """ saved = getattr(obj, attr) try: yield finally: setattr(obj, attr, saved)
apache-2.0
Nedgang/logol_analyse
analyse_logol.py
1
5660
# -*- coding: utf-8 -*- """ logol_analyse provide some analyse tools for logol xml results. Without any option, it will provide the number of hit, how many sequences have at least one hit, and a graph with the repartition of the hits. Usage: logol_analyse.py <input> <data> [options] options: --graph, -g=<name> The graph name, to save it directly. --help, -h It call help. UNBELIEVABLE!!!!! --nograph -n No graph creation --origin, -o INT The 0 emplacement on sequences [default: 150] --position -p=<name> Return a file containing position of each motif --result -r=<name> Save a fasta file with the matched sequences. --signature, -s=<name> Create a file with for each sequences the hits. --hits, -t Display a hits/sequences graph. --version, -v Maybe it's a trap ^^ --xclude, -x=<name> Create a file containing all unmatched sequences """ ########## # IMPORT # ########## import matplotlib.pyplot as plt import pylab import glob import os from docopt import docopt from lxml import etree from Bio import SeqIO ############# # ARGUMENTS # ############# if __name__ == '__main__': arguments = docopt(__doc__, version = '1.3') ######## # MAIN # ######## def __main__(arguments): total = 0 count = 0 hit = [] se = set() # Contain sequences header hits_per_seq = [] # Here we check all the .xml file for f in glob.glob(os.getcwd()+"/"+arguments['<input>']+"*.xml"): nb_hit = 0 total += 1 tree = etree.parse(f) # Collect of the hit beginning and ID for seq in tree.xpath("/sequences/match/begin"): count += 1 nb_hit +=1 hit.append(int(seq.text)-int(arguments['--origin'])) [se.add(a.text) for a in tree.xpath("/sequences/fastaHeader")] if nb_hit > 0: hits_per_seq.append(nb_hit) print("Nombre de hits: "+str(count)) print("Nombre de séquences touchées: "+str(len(se))+" sur "+str(total)) print("Nombre max de hits par séquences: "+str(max(hits_per_seq))) if arguments['--result'] != None: seq_match(se) if arguments['--xclude'] != None: seq_no_match(se) if arguments['--nograph'] == False: graph(hit) if arguments['--signature'] != None: save_signature() if arguments['--position'] != None: save_position() if arguments['--hits'] != False: display_hits(hits_per_seq) ############# # FUNCTIONS # ############# def seq_match(seq): out = open(os.getcwd()+'/'+arguments['--result'], 'w') data = open(os.getcwd()+'/'+arguments['<data>'], "rU") for s in SeqIO.parse(data, "fasta"): if s.id in seq: out.write(s.format("fasta")) out.close() data.close() def seq_no_match(seq): out = open(os.getcwd()+'/'+arguments['--xclude'], 'w') data = open(os.getcwd()+'/'+arguments['<data>'], "rU") for s in SeqIO.parse(data, "fasta"): if s.id not in seq: out.write(s.format("fasta")) out.close() data.close() def graph(hit): plt.hist(hit, range(min(hit), max(hit))) plt.xticks(range(min(hit), max(hit), 10)) plt.xlabel("Emplacement des hits sur les séquences") plt.ylabel("Nombre de hits") if arguments['--graph'] != None: plt.savefig(arguments['--graph']+'.png') pylab.close() else: plt.show() def save_signature(): sign = open(os.getcwd()+'/'+arguments['--signature'], 'w') for f in glob.glob(os.getcwd()+"/"+arguments['<input>']+"*"): fr = [] # Will have the last char of var, which is frag nb c = 0 tree = etree.parse(f) if tree.xpath("/sequences/match/variable") != []: [sign.write('>'+h.text+'\n') for h in tree.xpath("/sequences/fastaHeader")] [fr.append((int(i.get("name")[-1]))) for i in tree.xpath("/sequences/match/variable")] m = max(fr) # Fragments number to have the complete match for i in tree.xpath("/sequences/match/variable/content"): c += 1 sign.write(i.text) if c >= m: sign.write("\n") c = 0 sign.close() def save_position(): begin = [] # Will contain all the begining number end = [] seq = [] # Will contain all the sequences found iD = [] # Will contair the sequences ID n = 0 # nb of line we will have to write i = 0 pos = open(os.getcwd()+'/'+arguments['--position'], 'w') pos.write("ID\tbegin\tsequence\tend\n") for f in glob.glob(os.getcwd()+"/"+arguments['<input>']+"*"): tree = etree.parse(f) for s in tree.xpath("/sequences/match/variable/content"): n += 1 seq.append(s.text) [iD.append(h.text) for h in tree.xpath("/sequences/fastaHeader")] for b in tree.xpath("/sequences/match/variable/begin"): begin.append(str(b.text)) for e in tree.xpath("/sequences/match/variable/end"): end.append(str(e.text)) # Now, we write those info into the file while i < n: pos.write(iD[i]+"\t"+begin[i]+"\t"+seq[i]+"\t"+end[i]+"\n") i += 1 pos.close() def display_hits(hits_per_seq): plt.hist(hits_per_seq, range(min(hits_per_seq), max(hits_per_seq))) plt.xticks(range(min(hits_per_seq), max(hits_per_seq), 1)) plt.xlabel("Nombre de hits par séquences") plt.ylabel("Nombre de séquences") plt.show() ########## # LAUNCH # ########## __main__(arguments)
gpl-2.0
mprelee/data-incubator-capstone
src/gender_age_compare.py
1
4327
import pandas as pd import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns import pickle from scipy import stats from config import MAIN_DATA, AGE_GENDER_VIOLIN_PLOT, AGE_GENDER_BOX_PLOT,\ AGE_VIOLIN_PLOT, AGE_BOX_PLOT from preprocessing import default_preprocess AGE_PLOT_POSITIONS = [1,0,2] AGE_PALETTE = sns.color_palette() AGE_PLOT_ORDER = ['child','adult','elderly'] AGE_GENDER_PALETTE = sns.color_palette('Paired') AGE_GENDER_PLOT_ORDER = [\ ('child','F'),\ ('child','M'),\ ('adult','F'),\ ('adult','M'),\ ('elderly','F'),\ ('elderly','M') ] # Load data df = default_preprocess(pickle.load(open(MAIN_DATA,'rb'))) # Print mean stats print 'Mean fund time by age_group, gender' print df.groupby('age_group')['fund_time'].mean() print 'Mean fund time by age_group, gender' print df.groupby(['age_group','gender'])['fund_time'].mean() # Plot by age group plt.figure(1) sns.violinplot(df.fund_time,df.age_group,alpha=0.5,inner='box',\ color=AGE_PALETTE,order=AGE_PLOT_ORDER) plt.title('Fund Time (days) by age_group') plt.savefig(AGE_VIOLIN_PLOT,format='png') plt.figure(2) sns.boxplot(df.fund_time,df.age_group,alpha=0.5,\ color=AGE_PALETTE,order=AGE_PLOT_ORDER) plt.title('Fund Time (days) by age_group') plt.savefig(AGE_BOX_PLOT,format='png') plt.figure(3) sns.violinplot(df.fund_time,[df.age_group,df.gender],alpha=0.5,inner='box',\ color=AGE_GENDER_PALETTE,order=AGE_GENDER_PLOT_ORDER) plt.title('Fund Time (days) by age_group, gender') plt.savefig(AGE_GENDER_VIOLIN_PLOT,format='png') plt.figure(4) sns.boxplot(df.fund_time,[df.age_group,df.gender],alpha=0.5,\ color=AGE_GENDER_PALETTE,order=AGE_GENDER_PLOT_ORDER) plt.title('Fund Time (days) by age_group, gender') plt.savefig(AGE_GENDER_BOX_PLOT,format='png') # Analyze by age group first child_ft = df[df.age_group == 'child']['fund_time'] adult_ft = df[df.age_group == 'adult']['fund_time'] elderly_ft = df[df.age_group == 'elderly']['fund_time'] # Run 2-Sided Mann-Whitne U Test # https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test use_continuity = False u,prob_ca = stats.mannwhitneyu(child_ft, adult_ft, use_continuity=use_continuity) u,prob_ce = stats.mannwhitneyu(child_ft, elderly_ft, use_continuity=use_continuity) u,prob_ae = stats.mannwhitneyu(adult_ft, elderly_ft, use_continuity=use_continuity) # MULTIPLY RESULTS BY 2 print 'child vs. adult p-value: %.6f' % (2*prob_ca) print 'child vs. elderly p-value: %.6f' % (2*prob_ce) print 'adult vs. elderly p-value: %.6f' % (2*prob_ae) # Analyize by age group, gender m_child_ft = df[(df.age_group == 'child') & (df.gender == 'M')]['fund_time'] m_adult_ft = df[(df.age_group == 'adult') & (df.gender == 'M')]['fund_time'] m_elderly_ft = df[(df.age_group == 'elderly') & (df.gender == 'M')]['fund_time'] f_child_ft = df[(df.age_group == 'child') & (df.gender == 'F')]['fund_time'] f_adult_ft = df[(df.age_group == 'adult') & (df.gender == 'F')]['fund_time'] f_elderly_ft = df[(df.age_group == 'elderly') & (df.gender == 'F')]['fund_time'] u,prob_child = stats.mannwhitneyu(f_child_ft, m_child_ft, use_continuity=use_continuity) u,prob_adult = stats.mannwhitneyu(f_adult_ft, m_adult_ft, use_continuity=use_continuity) u,prob_elderly = stats.mannwhitneyu(f_elderly_ft, m_elderly_ft, use_continuity=use_continuity) print 'child gender comparison p-value: %.6f' % (2*prob_child) print 'adult gender comparison p-value: %.6f' % (2*prob_adult) print 'elderly gender comparison p-value: %.6f' % (2*prob_elderly) print 'Compare adult males' u,prob_fc = stats.mannwhitneyu(f_child_ft, m_adult_ft, use_continuity=use_continuity) u,prob_mc = stats.mannwhitneyu(m_child_ft, m_adult_ft, use_continuity=use_continuity) u,prob_fa = stats.mannwhitneyu(f_adult_ft, m_adult_ft, use_continuity=use_continuity) u,prob_fe = stats.mannwhitneyu(f_elderly_ft, m_adult_ft, use_continuity=use_continuity) u,prob_me = stats.mannwhitneyu(m_elderly_ft, m_adult_ft, use_continuity=use_continuity) print 'to female children, p-value: %.6f' % (2*prob_fc) print 'to male children, p-value: %.6f' % (2*prob_mc) print 'to female adults, p-value: %.6f' % (2*prob_fa) print 'to female elderly, p-value: %.6f' % (2*prob_fe) print 'to male elderly, p-value: %.6f' % (2*prob_me)
gpl-2.0
lambokini/SmartSTrader
macd.py
1
11037
''' This code is copyright Harrison Kinsley. The open-source code is released under a BSD license: Copyright (c) 2013, Harrison Kinsley All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' ''' This code is copyright Harrison Kinsley. The open-source code is released under a BSD license: Copyright (c) 2013, Harrison Kinsley All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. http://sentdex.com/sentiment-analysisbig-data-and-python-tutorials-algorithmic-trading/python-matplotlib-sample-code-charting-stocks-python/ http://sentdex.com/sentiment-analysisbig-data-and-python-tutorials-algorithmic-trading/python-finance-indicators-stocks-forex-futures-options-indicators-built-python/ ''' import urllib2 import time import datetime import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker as mticker import matplotlib.dates as mdates from matplotlib.finance import candlestick import matplotlib import pylab matplotlib.rcParams.update({'font.size': 9}) def rsiFunc(prices, n=14): deltas = np.diff(prices) seed = deltas[:n+1] up = seed[seed>=0].sum()/n down = -seed[seed<0].sum()/n rs = up/down rsi = np.zeros_like(prices) rsi[:n] = 100. - 100./(1.+rs) for i in range(n, len(prices)): delta = deltas[i-1] # cause the diff is 1 shorter if delta>0: upval = delta downval = 0. else: upval = 0. downval = -delta up = (up*(n-1) + upval)/n down = (down*(n-1) + downval)/n rs = up/down rsi[i] = 100. - 100./(1.+rs) return rsi def movingaverage(values,window): weigths = np.repeat(1.0, window)/window smas = np.convolve(values, weigths, 'valid') return smas # as a numpy array def ExpMovingAverage(values, window): weights = np.exp(np.linspace(-1., 0., window)) weights /= weights.sum() a = np.convolve(values, weights, mode='full')[:len(values)] a[:window] = a[window] return a def computeMACD(x, slow=26, fast=12): """ compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg' return value is emaslow, emafast, macd which are len(x) arrays """ emaslow = ExpMovingAverage(x, slow) emafast = ExpMovingAverage(x, fast) return emaslow, emafast, emafast - emaslow def graphData(stock,MA1,MA2): ''' Use this to dynamically pull a stock: ''' try: print 'Currently Pulling',stock print str(datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S')) #Keep in mind this is close high low open data from Yahoo urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=10y/csv' stockFile =[] try: sourceCode = urllib2.urlopen(urlToVisit).read() splitSource = sourceCode.split('\n') for eachLine in splitSource: splitLine = eachLine.split(',') if len(splitLine)==6: if 'values' not in eachLine: stockFile.append(eachLine) except Exception, e: print str(e), 'failed to organize pulled data.' except Exception,e: print str(e), 'failed to pull pricing data' try: date, closep, highp, lowp, openp, volume = np.loadtxt(stockFile,delimiter=',', unpack=True, converters={ 0: mdates.strpdate2num('%Y%m%d')}) x = 0 y = len(date) newAr = [] while x < y: appendLine = date[x],openp[x],closep[x],highp[x],lowp[x],volume[x] newAr.append(appendLine) x+=1 Av1 = movingaverage(closep, MA1) Av2 = movingaverage(closep, MA2) SP = len(date[MA2-1:]) fig = plt.figure(facecolor='#07000d') ax1 = plt.subplot2grid((6,4), (1,0), rowspan=4, colspan=4, axisbg='#07000d') candlestick(ax1, newAr[-SP:], width=.6, colorup='#53c156', colordown='#ff1717') Label1 = str(MA1)+' SMA' Label2 = str(MA2)+' SMA' ax1.plot(date[-SP:],Av1[-SP:],'#e1edf9',label=Label1, linewidth=1.5) ax1.plot(date[-SP:],Av2[-SP:],'#4ee6fd',label=Label2, linewidth=1.5) ax1.grid(True, color='w') ax1.xaxis.set_major_locator(mticker.MaxNLocator(10)) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')) ax1.yaxis.label.set_color("w") ax1.spines['bottom'].set_color("#5998ff") ax1.spines['top'].set_color("#5998ff") ax1.spines['left'].set_color("#5998ff") ax1.spines['right'].set_color("#5998ff") ax1.tick_params(axis='y', colors='w') plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) ax1.tick_params(axis='x', colors='w') plt.ylabel('Stock price and Volume') maLeg = plt.legend(loc=9, ncol=2, prop={'size':7}, fancybox=True, borderaxespad=0.) maLeg.get_frame().set_alpha(0.4) textEd = pylab.gca().get_legend().get_texts() pylab.setp(textEd[0:5], color = 'w') volumeMin = 0 ax0 = plt.subplot2grid((6,4), (0,0), sharex=ax1, rowspan=1, colspan=4, axisbg='#07000d') rsi = rsiFunc(closep) rsiCol = '#c1f9f7' posCol = '#386d13' negCol = '#8f2020' ax0.plot(date[-SP:], rsi[-SP:], rsiCol, linewidth=1.5) ax0.axhline(70, color=negCol) ax0.axhline(30, color=posCol) ax0.fill_between(date[-SP:], rsi[-SP:], 70, where=(rsi[-SP:]>=70), facecolor=negCol, edgecolor=negCol, alpha=0.5) ax0.fill_between(date[-SP:], rsi[-SP:], 30, where=(rsi[-SP:]<=30), facecolor=posCol, edgecolor=posCol, alpha=0.5) ax0.set_yticks([30,70]) ax0.yaxis.label.set_color("w") ax0.spines['bottom'].set_color("#5998ff") ax0.spines['top'].set_color("#5998ff") ax0.spines['left'].set_color("#5998ff") ax0.spines['right'].set_color("#5998ff") ax0.tick_params(axis='y', colors='w') ax0.tick_params(axis='x', colors='w') plt.ylabel('RSI') ax1v = ax1.twinx() ax1v.fill_between(date[-SP:],volumeMin, volume[-SP:], facecolor='#00ffe8', alpha=.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.grid(False) ax1v.set_ylim(0, 3*volume.max()) ax1v.spines['bottom'].set_color("#5998ff") ax1v.spines['top'].set_color("#5998ff") ax1v.spines['left'].set_color("#5998ff") ax1v.spines['right'].set_color("#5998ff") ax1v.tick_params(axis='x', colors='w') ax1v.tick_params(axis='y', colors='w') ax2 = plt.subplot2grid((6,4), (5,0), sharex=ax1, rowspan=1, colspan=4, axisbg='#07000d') # START NEW INDICATOR CODE # # END NEW INDICATOR CODE # plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) ax2.spines['bottom'].set_color("#5998ff") ax2.spines['top'].set_color("#5998ff") ax2.spines['left'].set_color("#5998ff") ax2.spines['right'].set_color("#5998ff") ax2.tick_params(axis='x', colors='w') ax2.tick_params(axis='y', colors='w') ax2.yaxis.set_major_locator(mticker.MaxNLocator(nbins=5, prune='upper')) for label in ax2.xaxis.get_ticklabels(): label.set_rotation(45) plt.suptitle(stock.upper(),color='w') plt.setp(ax0.get_xticklabels(), visible=False) plt.setp(ax1.get_xticklabels(), visible=False) '''ax1.annotate('Big news!',(date[510],Av1[510]), xytext=(0.8, 0.9), textcoords='axes fraction', arrowprops=dict(facecolor='white', shrink=0.05), fontsize=14, color = 'w', horizontalalignment='right', verticalalignment='bottom')''' plt.subplots_adjust(left=.09, bottom=.14, right=.94, top=.95, wspace=.20, hspace=0) plt.show() fig.savefig('example.png',facecolor=fig.get_facecolor()) except Exception,e: print 'main loop',str(e) while True: stock = raw_input('Stock to plot: ') graphData(stock,10,50)
mit
caseyclements/blaze
blaze/compute/tests/test_hdfstore.py
14
1791
import pytest tables = pytest.importorskip('tables') from blaze.compute.hdfstore import * from blaze.utils import tmpfile from blaze import symbol, discover, compute import pandas as pd from datetime import datetime from odo import Chunks, resource, into import os try: f = pd.HDFStore('foo') except (RuntimeError, ImportError) as e: pytest.skip('skipping test_hdfstore.py %s' % e) else: f.close() os.remove('foo') df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)], ['ab', 2, 20., datetime(2000, 2, 2)], ['abc', 3, 30., datetime(2000, 3, 3)], ['abcd', 4, 40., datetime(2000, 4, 4)]], columns=['name', 'a', 'b', 'time']) def test_hdfstore(): with tmpfile('.hdf5') as fn: df.to_hdf(fn, '/appendable', format='table') df.to_hdf(fn, '/fixed') hdf = resource('hdfstore://%s' % fn) s = symbol('s', discover(hdf)) assert isinstance(compute(s.fixed, hdf), (pd.DataFrame, pd.io.pytables.Fixed)) assert isinstance(compute(s.appendable, hdf), (pd.io.pytables.AppendableFrameTable, Chunks)) s = symbol('s', discover(df)) f = resource('hdfstore://%s::/fixed' % fn) a = resource('hdfstore://%s::/appendable' % fn) assert isinstance(pre_compute(s, a), Chunks) hdf.close() f.parent.close() a.parent.close() def test_groups(): with tmpfile('.hdf5') as fn: df.to_hdf(fn, '/data/fixed') hdf = resource('hdfstore://%s' % fn) assert discover(hdf) == discover({'data': {'fixed': df}}) s = symbol('s', discover(hdf)) assert list(compute(s.data.fixed, hdf).a) == [1, 2, 3, 4] hdf.close()
bsd-3-clause
yuginboy/from_GULP_to_FEFF
feff/libs/plot_data.py
1
2856
import sys import os from io import StringIO import numpy as np import matplotlib # Force matplotlib to not use any Xwindows backend. matplotlib.use('Agg') import matplotlib.gridspec as gridspec from matplotlib import pylab import matplotlib.pyplot as plt import scipy as sp from scipy.interpolate import interp1d def plotData(x = np.r_[0:50], y = np.cos(np.r_[0:50]/6*np.pi), error = np.random.rand(50) * 0.5, numOfIter = 1, y_median = np.sin(np.r_[0:50]/6*np.pi), y_max = np.cos(np.r_[0:50]/6*np.pi)+1.5, y_min = np.cos(np.r_[0:50]/6*np.pi)-1.5, out_dir = '/home/yugin/VirtualboxShare/FEFF/out', window_title = 'test', case = '33'): pylab.ion() # Force interactive plt.close('all') ### for 'Qt4Agg' backend maximize figure plt.switch_backend('QT5Agg', ) # plt.switch_backend('QT4Agg', ) fig = plt.figure( ) # gs1 = gridspec.GridSpec(1, 2) # fig.show() # fig.set_tight_layout(True) figManager = plt.get_current_fig_manager() DPI = fig.get_dpi() fig.set_size_inches(1920.0 / DPI, 1080.0 / DPI) gs = gridspec.GridSpec(1,1) ax = fig.add_subplot(gs[0,0]) txt = 'GaMnAs case %s, ' % case + '$\chi(k)$ when the Number of the treated file is: {0}'.format(numOfIter) fig.suptitle(txt, fontsize=22, fontweight='normal') # Change the axes border width for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2) # plt.subplots_adjust(top=0.85) # gs1.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) fig.tight_layout(rect=[0.03, 0.03, 1, 0.95], w_pad=1.1) # put window to the second monitor # figManager.window.setGeometry(1923, 23, 640, 529) figManager.window.setGeometry(1920, 20, 1920, 1180) figManager.window.setWindowTitle(window_title) figManager.window.showMinimized() # plt.show() ax.plot( x, y, label = '<$\chi(k)$>' ) ax.plot( x, y_median, label = '$\chi(k)$ median', color = 'darkcyan') ax.plot( x, y_max, label = '$\chi(k)$ max', color = 'skyblue' ) ax.plot( x, y_min, label = '$\chi(k)$ min', color = 'lightblue' ) fig.tight_layout(rect=[0.03, 0.03, 1, 0.95], w_pad=1.1) ax.plot(x, y, 'k', color='#1B2ACC') ax.fill_between(x, y-error, y+error, alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', linewidth=4, linestyle='dashdot', antialiased=True, label = '$\chi(k)$') ax.grid(True) plt.legend() ax.set_ylabel('$\chi(k)$', fontsize=20, fontweight='bold') ax.set_xlabel('$k$', fontsize=20, fontweight='bold') ax.set_ylim(ymin = -0.3, ymax= 0.5) figManager.window.showMinimized() # plt.draw() # save to the PNG file: out_file_name = '%s_' % (case) + "%05d.png" %(numOfIter) fig.savefig( os.path.join(out_dir, out_file_name) ) if __name__ == "__main__": plotData() print ('plot the data')
gpl-3.0
wail007/ml_playground
linear_regression.py
1
4621
import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.stats import multivariate_normal class _LinearModel(object): def __init__(self): self.w = None def fit(self, x, y): pass def predict(self, x): return np.dot(x, self.w) def cost(self, x, y): pass def precision(self, x, y): p = self.predict(x) return (1.0 / len(p)) * np.sum(p == y) class LeastSquareRegression(_LinearModel): def __init__(self): super(LeastSquareRegression, self).__init__() def fit(self, x, y): xt = x.transpose() self.w = np.linalg.pinv(np.dot(xt, x)).dot(xt).dot(y) def cost(self, x, y): """ Residual Sum of Squares """ r = y - np.dot(x, self.w) rt= np.transpose(r) return (1.0 / len(x)) * np.trace(np.dot(rt, r)) class RidgeRegression(LeastSquareRegression): def __init__(self, incr=0.1, min_change=0.001): super(RidgeRegression, self).__init__() self.incr = incr self.min_change = min_change def fit(self, x, y): xtrain, xval = np.split(x, [int(0.7*len(x))]) ytrain, yval = np.split(y, [int(0.7*len(y))]) alpha = 0.0 best_alpha = 0.0 best_cost = float("inf") old_cost = float("inf") new_cost = float("inf") while True: self._fit(xtrain, ytrain, alpha) new_cost = self.cost(xval, yval) if new_cost < best_cost: best_cost = new_cost best_alpha = alpha #print("cost: %f, alpha: %f" % (best_cost, best_alpha)) if abs(new_cost - old_cost) < self.min_change: break old_cost = new_cost alpha += self.incr self._fit(xtrain, ytrain, best_alpha) def _fit(self, x, y, alpha): x = x[:,1:] xt = np.transpose(x) self.w = np.linalg.pinv(np.dot(xt, x) + alpha * np.eye(x.shape[1])).dot(xt).dot(y) bias = np.mean(y, axis=0, keepdims=True) - np.dot(np.mean(x, axis=0, keepdims=True), self.w) self.w = np.vstack([bias, self.w]) class LeastSquareClassification(LeastSquareRegression): def __init__(self): super(LeastSquareClassification, self).__init__() def predict(self, x): return super(LeastSquareClassification, self).predict(x).argmax(axis=1) class RidgeClassification(RidgeRegression): def __init__(self, incr=0.1, min_change=0.001): super(RidgeClassification, self).__init__(incr, min_change) def predict(self, x): return super(RidgeClassification, self).predict(x).argmax(axis=1) class LDAClassification(_LinearModel): def __init__(self): self.w = None self.priors = None self.means = [] self.covs = [] def fit(self, x, y): k = y.shape[1] y_arg = np.argmax(y, axis=1) class_count = np.sum (y, axis=0, keepdims=True) self.priors = (1.0 / len(y)) * np.sum (y, axis=0, keepdims=True) self.w = self._lda(x, y) x_proj = np.dot(x, self.w) means = (1.0 / class_count.T) * np.dot(y.T, x_proj) for i in xrange(k): xk_proj = x_proj[y_arg==i] self.means.append(np.mean(xk_proj, axis = 0)) self.covs .append(np.cov (xk_proj, rowvar=False)) def predict(self, x): k = self.w.shape[1] x_proj = np.dot(x, self.w) likelihood = np.column_stack([multivariate_normal.pdf(x_proj, self.means[i], self.covs[i]) for i in xrange(k)]) posterior = (likelihood * self.priors) posterior = posterior / np.sum(posterior, axis=1, keepdims=True) return np.argmax(posterior, axis=1) def _lda(self, x, y): k = y.shape[1] y_arg = np.argmax(y, axis=1) class_count= np.sum (y, axis=0, keepdims=True) total_mean = np.mean(x, axis=0, keepdims=True) class_mean = (1.0 / class_count.T) * np.dot(y.T, x) mk_m = class_mean - total_mean b_cov = np.dot(class_count * mk_m.T, mk_m) w_cov = np.zeros(b_cov.shape) for i in xrange(k): xk = x[y_arg == i] xk_mk = xk - class_mean[i] w_cov += np.dot(xk_mk.T, xk_mk) eig_vals, eig_vecs = np.linalg.eig(np.dot(np.linalg.pinv(w_cov), b_cov)) eig_vals = np.abs(eig_vals) eig_args = np.argsort(eig_vals)[::-1][:k] return eig_vecs[:, eig_args]
apache-2.0
jeremymcrae/denovoFilter
denovoFilter/screen_candidates.py
1
3222
""" Copyright (c) 2016 Genome Research Ltd. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from denovoFilter.load_candidates import load_candidates from denovoFilter.preliminary_filtering import preliminary_filtering from denovoFilter.exclude_segdups import check_segdups from denovoFilter.missing_symbols import fix_missing_gene_symbols from denovoFilter.standardise import standardise_columns def screen_candidates(de_novos_path, fails_path, filter_function, maf=0.01, fix_symbols=True, annotate_only=False, build='grch37'): """ load and optionally filter candidate de novo mutations. Args: de_novos_path: path to table of unfiltered canddiate DNMs fails_path: path to file listing samples which failed QC, and therefore all of their candidates need to be excluded. filter_function: function for filtering the candidates, either filter_denovogear_sites(), or filter_missing_indels(). maf: MAF threshold for filtering. This is 0.01 for denovogear sites, and 0 for the missing indels. fix_symbols: whether to annotate HGNC symbols for candidates missing these. annotate_only: whether to include a column indicating pass status, rather than excluding all candidates which fail the filtering. build: whether to use the 'grch37' or 'grch38' build to get missing symbols. Returns: pandas DataFrame of candidate de novo mutations. """ if de_novos_path is None: return None # load the datasets de_novos = load_candidates(de_novos_path) sample_fails = [] if fails_path is not None: sample_fails = [ x.strip() for x in open(fails_path) ] # run some initial screening status = preliminary_filtering(de_novos, sample_fails, maf_cutoff=maf) segdup = check_segdups(de_novos) if fix_symbols: de_novos['symbol'] = fix_missing_gene_symbols(de_novos, build) pass_status = filter_function(de_novos, status & segdup) & status & segdup if annotate_only: de_novos['pass'] = pass_status else: de_novos = de_novos[pass_status] return standardise_columns(de_novos)
mit
awsteiner/o2sclpy
o2sclpy/doc_data.py
1
39617
from o2sclpy.utils import terminal ter=terminal() version='0.926.a1' cmaps=[('Perceptually Uniform Sequential', ['viridis','plasma','inferno','magma']), ('Sequential', ['Greys','Purples','Blues','Greens','Oranges','Reds', 'YlOrBr','YlOrRd','OrRd','PuRd','RdPu','BuPu', 'GnBu','PuBu','YlGnBu','PuBuGn','BuGn','YlGn']), ('Sequential (2)', ['binary','gist_yarg','gist_gray','gray','bone','pink', 'spring','summer','autumn','winter','cool','Wistia', 'hot','afmhot','gist_heat','copper']), ('Diverging', ['PiYG','PRGn','BrBG','PuOr','RdGy','RdBu', 'RdYlBu','RdYlGn','Spectral','coolwarm','bwr','seismic']), ('Qualitative', ['Pastel1','Pastel2','Paired','Accent', 'Dark2','Set1','Set2','Set3', 'tab10','tab20','tab20b','tab20c']), ('Miscellaneous', ['flag','prism','ocean','gist_earth','terrain','gist_stern', 'gnuplot','gnuplot2','CMRmap','cubehelix','brg','hsv', 'gist_rainbow','rainbow','jet','nipy_spectral','gist_ncar'])] """ List of cmaps for 'help cmaps' """ new_cmaps=[('O2sclpy cmaps', ['jet2','pastel2','reds2','greens2','blues2'])] """ List of new o2sclpy cmaps """ base_list=[ ["addcbar","Add a color bar.", "<left> <bottom> <width> <height> [kwargs]", "Add a color bar from the most recently created image. The "+ "axes object for the first colorbar added is named "], ["arrow","Plot an arrow.", "<x1> <y1> <x2> <y2> <arrow properties> [kwargs]", "Plot an arrow from (x1,y1) to (x2,y2). This command uses "+ "axes.annotate() to generate an arrow with an empty string "+ "as the first argument to annotate(). The o2graph argument <arrow "+ "properties> is the python dictionary for the 'arrowprops' "+ "argument to annotate(). The arrowstyle and connectionstyle "+ "attributes should be listed along with other arrowprops attributes.\n"+ " \nExamples for arrowprops are:\n"+ "\"arrowstyle=->,connectionstyle=arc3\"\n"+ "\"arrowstyle=-|>,connectionstyle=arc,fc=red,ec=blue\"\n"+ "\"arrowstyle=-|>,connectionstyle=arc,head_length=4.0,"+ "head_width=1.0\"\n"+ "\"arrowstyle=->,connectionstyle=arc3,head_length=4.0,"+ "head_width=1.0,rad=-0.1\"\n"+ "\"arrowstyle=fancy,connectionstyle=arc3,head_length=4.0,"+ "head_width=1.0,rad=-0.1\"\n \n"+ "Summary for arrowstyle argument (angleB is renamed to as_angleB):\n"+ "Name Attributes\n"+ "- None\n"+ "-> head_length=0.4,head_width=0.2\n"+ "-[ widthB=1.0,lengthB=0.2,as_angleB=None\n"+ "|- widthA=1.0,widthB=1.0\n"+ "-| head_length=0.4,head_width=0.2\n"+ "<- head_length=0.4,head_width=0.2\n"+ "<- head_length=0.4,head_width=0.2\n"+ "<| head_length=0.4,head_width=0.2\n"+ "<| head_length=0.4,head_width=0.2\n"+ "fancy head_length=0.4,head_width=0.4,tail_width=0.4\n"+ "simple head_length=0.5,head_width=0.5,tail_width=0.2\n"+ "wedge tail_width=0.3,shrink_factor=0.5\n \n"+ "(note that fancy, simple or wedge require arc3 or angle3 connection "+ "styles)\n \n"+ "Summary for connectionstyle argument (angleB is renamed to "+ "cs_angleB):\n"+ "Name Attributes\n"+ "angle angleA=90,cs_angleB=0,rad=0.0\n"+ "angle3 angleA=90,cs_angleB=0\n"+ "arc angleA=0,cs_angleB=0,armA=None,armB=None,rad=0.0\n"+ "arc3 rad=0.0\n"+ "bar armA=0.0,armB=0.0,fraction=0.3,angle=None\n \n"+ "See https://matplotlib.org/2.0.2/users/annotations.html for more."], ["backend","Select the matplotlib backend to use.","<backend>", "This selects the matplotlib backend. "+ "Typical values are 'Agg', 'TkAgg', 'WX', 'QTAgg', 'QT4Agg'. "+ "Use -backend Agg to save the plot to a file without "+ "opening a window. The backend can only be changed once, i.e. "+ "if the "+ter.cyan_fg()+ter.bold()+"backend"+ter.default_fg()+ " command is invoked "+ "more than once, then only the last invocation will have any "+ "effect."], ["canvas","Create a plotting canvas.","", "Create an empty plotting canvas. For example 'o2graph "+ "-canvas -show'. Typically, 'o2graph' creates "+ "the canvas automatically so explicitly using this command "+ "is unnecessary."], ["clf","Clear the current figure.","", "Clear the current figure."], ["ellipse","Plot an ellipse.", "<x> <y> <w> <h> [angle] [kwargs]", ("Plot an ellipse centered at (x,y) with width w and height h, "+ "optionally rotated by the specified angle. By default, the "+ "ellipse has no border, "+ "but the linewidth ('lw') and edgecolor kwargs can be used to "+ "specify one if desired. Some useful kwargs are alpha, color, "+ "edgecolor (ec), facecolor (fc), fill, hatch, linestyle (ls), "+ "linewidth (lw).")], ["eval","Run the python eval() function.","<python code>", "Take the python code given and execute it using eval(). "+ "For example, 'o2graph -eval \"print(numpy.pi)\"'."], ["exec","Run the python code specified in file","<filename>", "Take the python code given and execute it using execfile(). "+ "For example, 'o2graph -eval \"print(numpy.pi)\"'."], ["image","Plot an image.","<file>", "Read a .png file, create a plot, and then call plot.show()."], ["inset","Add a new set of axes (e.g. for an inset).", "<left> <bottom> <width> <height> [kwargs]", "This command creates a new set of axes, adds the new axies "+ "to the list of axes, and sets the new axes as the current. "+ "The axes object is named 'inset0' for the first inset, then "+ "'inset1', and so on."], ["line","Plot a line.","<x1> <y1> <x2> <y2> [kwargs]", "Plot a line from (x1,y1) to (xy,y2). Some useful "+ "kwargs are color (c), dashes, linestyle (ls), linewidth (lw), "+ "marker, markeredgecolor (mec), markeredgewidth (mew), "+ "markerfacecolor (mfc), markerfacecoloralt (mfcalt), markersize "+ "(ms). For example: o2graph -line 0.05 0.05 0.95 0.95 "+ "lw=0,marker='+' -show"], ["modax","Modify current axes properties.","[kwargs]", ("kwarg Values Description\n"+ "------------------------------------------------------------------------\n"+ "alpha float>0 alpha value for region inside axes\n"+ "labelsize float>0 font size for labels\n"+ "x_loc b,t,tb placement of x-axis (bottom, top, or both)\n"+ "x_major_loc float>0 linear increment for x-axis major ticks\n"+ "x_minor_loc float>0 linear increment for x-axis minor ticks\n"+ "x_minor_tick_dir in,out,inout direction of x-axis minor ticks\n"+ "x_minor_tick_len float>0 length of x-axis minor ticks\n"+ "x_minor_tick_wid float>0 width of x-axis minor ticks\n"+ "x_tick_dir in,out,inout direction of x-axis major ticks\n"+ "x_tick_len float>0 length of x-axis major ticks\n"+ "x_tick_wid float>0 width of x-axis major ticks\n"+ "x_visible T/F set x-axis visible or invisible\n"+ "y_loc l,r,lr placement of y-axis (left, right, or both)\n"+ "y_major_loc float>0 linear increment for x-axis major ticks\n"+ "y_minor_loc float>0 linear increment for x-axis minor ticks\n"+ "y_minor_tick_dir in,out,inout direction of y-axis minor ticks\n"+ "y_minor_tick_len float>0 length of y-axis minor ticks\n"+ "y_minor_tick_wid float>0 width of y-axis minor ticks\n"+ "y_tick_dir in,out,inout direction of y-axis major ticks\n"+ "y_tick_len float>0 length of y-axis major ticks\n"+ "y_tick_wid float>0 width of y-axis major ticks\n"+ "y_visible T/F set y-axis visible or invisible\n")], ["o2scl-addl-libs","Specify a list of additional libraries to load.", "<dir>",""], ["o2scl-cpp-lib","Specify the location of the standard C++ library.", "<dir>",""], ["o2scl-lib-dir","Specify the directory where libo2scl.so is.", "<dir>",""], ["plotv","Plot several vector-like data sets.", "[multiple vector spec. for x] <multiple vector spec. for y>", "The "+ter.cyan_fg()+ter.bold()+"plotv"+ter.default_fg()+ " command plots one or several pairs of vectors for x "+ "and y. The total number of curves plotted will be the number "+ "of vector data sets from the first argument times the number "+ "of vector data sets from the second argument. If the x and y "+ "vector lengths are not equal, then the longer vector is "+ "truncated. Any kwargs are applied to all curves plotted. For "+ "details on multiple vector specifications, use "+ "'o2graph -help "+ter.green_fg()+ter.bold()+"mult-vector-spec"+ ter.default_fg()+"'. Note that "+ter.cyan_fg()+ter.bold()+"plotv"+ ter.default_fg()+" uses "+ "the vector<contour_line> object as temporary storage, so if "+ "the current object has type vector<contour_line> then you "+ "will need to save that object to a file and use "+ter.cyan_fg()+ ter.bold()+"clear"+ter.default_fg()+" first."], ["point","Plot a single point.","<x> <y>",""], ["error-point","Plot a single point with errorbars.", "<x> <y> [<x err> <yerr>] or [<x lo> <x hi> <y lo> <y hi>]", "Some "+ "useful kwargs "+ "for the error-point command are:\n\n"+ "keyword description default value\n"+ "---------------------------------------------------------\n"+ "ecolor error bar color None\n"+ "capsize cap size in points None\n"+ "barsabove plot error bars on top of point False\n"+ "lolims y value is lower limit False\n"+ "uplims y value is upper limit False\n"+ "xlolims x value is lower limit False\n"+ "xuplims x value is upper limit False\n"+ "errorevery draw error bars on subset of data 1\n"+ "capthick thickness of error bar cap None\n\n"+ "See also "+ter.cyan_fg()+ter.bold()+"errorbar"+ter.default_fg()+ " for for plotting columns from a "+ter.magenta_fg()+ter.bold()+ "table "+ter.default_fg()+"object."], ["python","Begin an interactive python session.","",""], ["rect","Plot a rectangle.", "<x1> <y1> <x2> <y2> [angle] [kwargs]", "Plot a rectange from (x1,y1) to (xy,y2) with "+ "rotation angle <angle>. By default, the rectangle has no border, "+ "but the linewidth ('lw') and edgecolor kwargs can be used to "+ "specify one if desired. Some useful kwargs are alpha, color, "+ "edgecolor (ec), facecolor (fc), fill, hatch, linestyle (ls), "+ "linewidth (lw)."], ["save","Save the current plot in a file.","<filename>", "Save the current plot in a file similar "+ "to plot.savefig(). The action of this command depends on "+ "which backend was selected. File type depends on the "+ "extension, typically either .png, .pdf, .eps, .jpg, .raw, .svg, "+ "and .tif ."], ["selax","Select axis.","[name]", "Select which axis to use for subsequent plotting commands. "+ "If [name] is not specified, then the names of all current "+ "axes objects are listed."], ["show","Show the current plot.","","Show the current plot "+ "on the screen and begin "+ "the graphical user interface. This is similar to plot.show()."], ["subadj","Adjust spacing of subplots.","<kwargs>", "Adjust the spacing for subplots after using the 'subplots' "+ "command. All arguments are keyword arguments. The kwargs for "+ "'subadj' are left, right, bottom, top, "+ "wspace, and hspace. This just a wrapper to the "+ "pyplot.subplots_adjust() function."], ["subplots","Create subplots.","<nrows> <ncols> [kwargs]", "Create a grid of <nrows> by <ncols> subplots. "+ "The kwargs currently supported are 'sharex=True|False', "+ "and 'sharey=True|False'. Subplots are named 'subplot0', 'subplot1', "+ "... with the indexes moving to the right before proceeding to "+ "the next row."], ["text","Plot text in the data coordinates.", "<x> <y> <text> [kwargs]","The 'text' command plots text in the "+ "data coordinates defined by the current axes with the font size "+ "determined by the value of the parameter 'font'. LaTeX is used "+ "for text rendering by default, but this setting can be changed "+ "using, e.g. '-set usetex 0'. Some useful kwargs are fontfamily, "+ "fontstyle, fontsize, color, backgroundcolor, rotation, "+ "horizontalalignment (ha), and verticalalignment (va). Note that "+ "you must disable LaTeX rendering to change fontfamily or "+ "fontstyle."], ["textbox", "Plot a box with text.","<x1> <y1> <text> [bbox properties] [kwargs]", "Plot text <text> and a box at location <x1> <y1>. For example, "+ "textbox 0.5 0.5 \"$ f(x) $\" \"alpha=0.8,facecolor=white\" . "+ "This command uses the standard axis text function, but adds "+ "a bounding box with the specified properties. Typical bbox "+ "properties are boxstyle (Circle, DArrow, LArrow, RArrow, Round, "+ "Round4, Roundtooth, Sawtooth, Square), alpha, color, edgecolor (ec), "+ "facecolor (fc), fill, hatch ({'/','\','|','-','+','x','o','O',"+ "'.', '*'}), linestyle (ls), and linewidth (lw). The keyword "+ "arguments are for the text properties, and follow "+ "those of the "+ter.cyan_fg()+ter.bold()+"text"+ter.default_fg()+ " command."], ["ttext","Plot text in window coordinates [(0,0) to (1,1)].", "<x> <y> <text> [kwargs]","The "+ter.cyan_fg()+ter.bold()+"ttext"+ ter.default_fg()+" command plots text in the "+ "window coordinates [typically (0,0) to (1,1)] with the font size "+ "determined by the value of the parameter "+ter.red_fg()+ ter.bold()+"font"+ter.default_fg()+" LaTeX is used "+ "for text rendering by default, but this setting can be changed "+ "using, e.g. '-set usetex 0'. Some useful kwargs are fontfamily, "+ "fontstyle, fontsize, color, backgroundcolor, rotation, "+ "horizontalalignment (ha), and verticalalignment (va). Specifying "+ "fontsize overrides the "+ter.red_fg()+ter.bold()+"font"+ ter.default_fg()+" parameter Note that "+ "you must disable LaTeX rendering to change fontfamily or "+ "fontstyle."], ["xlimits","Set the x-axis limits.","<low> <high>", "The "+ter.cyan_fg()+ter.bold()+"xlimits"+ ter.default_fg()+" command sets "+ter.red_fg()+ ter.bold()+"xlo"+ter.default_fg()+" and "+ter.red_fg()+ ter.bold()+"xhi"+ter.default_fg()+" to the specified limits, "+ "and sets "+ter.red_fg()+ ter.bold()+"xset"+ter.default_fg()+" to true. If a plotting "+ "canvas is currently "+ "open, then "+ "the x-limits on the current axis are modified. Future plots are also "+ "plot with the specified x-limits. If <low> and <high> are identical "+ "then "+ter.red_fg()+ ter.bold()+"xset"+ter.default_fg()+" is set to false and the x "+ "limits are automatically set by matplotlib."], ["xtitle","Add x title to plot (or subplot).","",""], ["ylimits","Set the y-axis limits.","<low> <high>", "Set 'ylo' and 'yhi' to the specified limits, "+ "and set 'yset' to true. If a plotting canvas is currently "+ "open, then "+ "the y-limits on the current axis are modified. Future plots are also "+ "set with the specified y-limits. If <low> and <high> are identical "+ "then "+ter.red_fg()+ ter.bold()+"yset"+ter.default_fg()+" is set to false and the y "+ "limits are automatically set by matplotlib."], ["ytitle","Add y title to plot (or subplot).","",""], ["yt-ann","Annotate a yt rendering (experimental).","", "The 'yt-ann' command adds a list of o2graph commands that can "+ "be used to annotate a yt rendering. Annotations are normal "+ "o2graph 2D plotting commands built upon a coordinate system with "+ "(0,0) as the lower-left corner of the image and (1,1) as the "+ "upper-right corner. "+ "yt-ann command arguments may include dashes but must end with the "+ "word 'end'.\n\n"+ "Examples are:\n -yt-ann -text 0.1 0.95 \"Ann. example\" "+ "color=w,ha=left end"], ["yt-arrow","Draw an arrow in the yt volume.", "<[x1,y1,z1]> <[x2,y2,z2]> [kwargs]", "Draw an arrow from the tail at (x1,y1,z1) to the head at "+ "(x2,y2,z2). Relevant kwargs are "+ "color=[r,g,b,a] where r,g,b,a are all from 0 to 1 and "+ "keyname='o2sclpy_line' for the key name in the list yt sources, "+ "n_lines=40 for the number of lines around the azimuthal angle, "+ "frac_length=0.05 for the fractional length of the head relative "+ "to the full arrow length, radius=0.0125 for the radius of the "+ "largest part of the arrow head, coords=user to use either the "+ "internal or user-based coordinate system. "+ "If the x, y, and z limits have not yet been set, then the "+ "lower limit for the x coordinate will be set by the minimum "+ "of x1 and x2, and the upper limit for the x coordinate will be "+ "set to the maximum of x1 and x2. Similarly for y and z. If "+ "a yt volume has not yet been constructed, then the default "+ "volume will be created."], ["yt-axis","Add an axis to the yt volume.", "[x] [y] [z] [kwargs]", "Plot an axis using a point at the origin and then "+ "three arrows pointing to "+ "[0,0,xval], [0,yval,0], and [0,0,zval]. "+ "Relevant kwargs are color=[1,1,1,0.5], and coords='user' "+ "coords=user to use the "+ "user-based coordinate system or 'internal' to use the internal "+ "coordinates"], ["yt-box","Draw a box in the yt volume.", "<[x1,y1,z1]> <[x2,y2,z2]> [kwargs]", "Draw a box with diagonally opposed corners "+ "(x1,y1,z1) to (x2,y2,z2). Relevant kwargs are "+ "color=[r,g,b,a] where r,g,b,a are all from 0 to 1, "+ "coords=user to use the "+ "user-based coordinate system or 'internal' to use the internal "+ "coordinates, and "+ "keyname='o2sclpy_line' for the key name in the list yt sources. "+ "If the x, y, and z limits have not yet been set, then the "+ "lower limit for the x coordinate will be set by the minimum "+ "of x1 and x2, and the upper limit for the x coordinate will be "+ "set to the maximum of x1 and x2. Similarly for y and z. If "+ "a yt volume has not yet been constructed, then the default "+ "volume will be created."], ["yt-line","Draw a line in the yt volume.", "<x1> <y1> <z1> <x2> <y2> <z2> [kwargs]", "Draw a line from (x1,y1,z1) to (x2,y2,z2). Relevant kwargs are "+ "color=[r,g,b,a] where r,g,b,a are all from 0 to 1, "+ "coords=user to use the "+ "user-based coordinate system or 'internal' to use the internal "+ "coordinates. and "+ "keyname='o2sclpy_line' for the key name in the list yt sources. "+ "If the x, y, and z limits have not yet been set, then the "+ "lower limit for the x coordinate will be set by the minimum "+ "of x1 and x2, and the upper limit for the x coordinate will be "+ "set to the maximum of x1 and x2. Similarly for y and z. If "+ "a yt volume has not yet been constructed, then the default "+ "volume will be created."], ["yt-path","Add a path to the yt animation.", "<type> <number of frames> <other parameters>", "This adds a path to the yt animation. To rotate the camera around "+ "the z-axis, use 'yaw' <n_frames> "+ "<angle>, where angle is a fraction of a full rotation. To zoom "+ "the camera, use 'zoom' "+ "<n_frames> <factor> ,where factor is the total zoom factor to "+ "apply over all n_frames. To move the camera along a line, "+ "use 'move' <n_frames> "+ "<[dest_x,dest_y,dest_z]> <'internal' or 'user'>, where the third "+ "argument is the destination in either the internal or user-specified "+ "coordinate system. To turn the camera without moving it, use "+ "'turn' <n_frames> <[foc_x,foc_y,foc_z]> <'internal' or 'user'>. "+ "Executing 'yt-path reset' resets the yt "+ "animation path to an empty list (for no animation)."], ["yt-render","Render the yt volume visualization.", "<filename or pattern> [movie output filename]", "Perform the volume rendering. If yt_path is empty, then "+ "the first argument is the filename. If yt_path is not empty "+ "then the first argument is a filename pattern containing * "+ "where each frame will be stored. If yt_path is not empty "+ "and a movie filename is given, then ffmpeg will be used "+ "to combine the frames into an mp4 file."], ["yt-source-list","List all current yt sources.","", "For each source output the index, keyname, and source type."], ["yt-text","Add text to the yt volume.", "<x> <y> <z> <text>","reorient=False"], ["yt-tf","Edit the yt transfer function.","<mode> <args>", "To create a new transfer function, use 'new' for <mode> "+ "and the remaining <args> are <min> <max> [nbins] "+ ".To add a Gaussian, use 'gauss' for <mode> "+ "and <args> are <loc> <width> <red> <green> <blue>, and <alpha>. "+ "To add a step function, use 'step' "+ "<low> <high> <red> <green> <blue>, and <alpha> "+ "To plot the transfer function, use 'plot' "+ "<filename>."], ["zlimits","Set the z-axis limits.","<low> <high>", "Set 'zlo' and 'zhi' to the specified limits, "+ "and set 'zset' to true. The z-axis limits are principally used "+ "for yt volume visualizations. If <low> and <high> are identical "+ "then "+ter.red_fg()+ ter.bold()+"zset"+ter.default_fg()+" is set to false."] ] """ This is a list of 4-element entries: 1: command name 2: short description 3: argument list 4: full help text """ extra_types=["table","table3d","hist_2d","hist","double[]","int[]", "size_t[]","tensor","tensor<int>","tensor<size_t>", "tensor_grid"] """ List of types which have additional plotting commands """ extra_list=[ ["table","plot", "Plot two columns from the table.", "<x> <y> [kwargs]", "If the current object is a table, then plot "+ "column <y> versus column "+ "<x>. If the current object is a one-dimensional histogram, then plot "+ "the histogram weights as a function of the bin representative values. "+ "If the current object is a set of contour lines, then plot the full "+ "set of contour lines. Some useful kwargs are color (c), dashes, "+ "linestyle (ls), linewidth (lw), marker, markeredgecolor (mec), "+ "markeredgewidth (mew), markerfacecolor (mfc), markerfacecoloralt "+ "(mfcalt), markersize (ms). For example: o2graph -create x 0 10 0.2 "+ "-function sin(x) y -plot x y lw=0,marker='+' -show"], ["table","plot-color", "Plot three columns from the table.", "<x> <y> <z> <cmap> [kwargs]", "If the current object is a table, then plot "+ "column <y> versus column "+ "<x> using line segments colored by column <z> which are rescaled "+ "to colormap <cmap>. "+ "Some useful kwargs are color (c), dashes, "+ "linestyle (ls), linewidth (lw), marker, markeredgecolor (mec), "+ "markeredgewidth (mew), markerfacecolor (mfc), markerfacecoloralt "+ "(mfcalt), markersize (ms). For example: o2graph -create x 0 10 0.2 "+ "-function sin(x) y -function cos(x) z -plot-color x y z "+ "Purples lw=0,marker='+' -show"], ["table","rplot", "Plot a region inside a column or in between two columns.", "<x1> <y1> [x2 y2] [kwargs]", "If either 2 or 3 arguments are specified, "+ "this command plots the "+ "region inside the curve defined by the specified set of x and y "+ "values. The first point is copied at the end to ensure a closed "+ "region. If 4 or 5 arguments are specified, then this command plots the "+ "region in between two sets of x and y values, again adding the first "+ "point from (x1,y1) to the end to ensure a closed region."], ["table","scatter","Create a scatter plot from 2-4 columns.", "<x> <y> [s] [c] [kwargs]", "This command creates a scatter plot form "+ "columns <x> and <y>, "+ "optionally using column [s] to choose the marker size and optionally "+ "using column [c] to choose the marker color. To vary the marker colors "+ "while choosing the default marker size just specify 'None' as the "+ "argument for [s]. Or, to specify keyword arguments while using the "+ "default size and color, specify 'None' as the argument for both [s] "+ "and [c]."], ["table","errorbar", "Plot the specified columns with errobars.", "<x> <y> <xerr> <yerr> [kwargs]", "Plot column <y> versus column <x> with "+ "symmetric error bars given in "+ "column <xerr> and <yerr>. For no uncertainty in either the x or y "+ "direction, just use 0 for <xerr> or <yerr>, respectively. Some "+ "useful kwargs "+ "for the errorbar command are:\n\n"+ "keyword description default value\n"+ "---------------------------------------------------------\n"+ "ecolor error bar color None\n"+ "elinewidth error bar line width None\n"+ "capsize cap size in points None\n"+ "barsabove plot error bars on top of points False\n"+ "lolims y value is lower limit False\n"+ "uplims y value is upper limit False\n"+ "xlolims x value is lower limit False\n"+ "xuplims x value is upper limit False\n"+ "errorevery draw error bars on subset of data 1\n"+ "capthick thickness of error bar cap None\n\n"+ "For error points with no lines use, e.g. lw=0,elinewidth=1 . "+ "See also 'error-point' for plotting a single point with errorbars."], ["table","yt-scatter","Add scattered points to a yt scene", ("<x column> <y column> <z column> [size column] [red column] "+ "[green column] [blue column] [alpha column]"), ("This adds a series of points to a yt scene. If a volume "+ "has not yet been added, then a default volume is added. "+ "If the x, y-, or z-axis limits have not yet been set, then "+ "they are set by the limits of the data. If the size column "+ "is unspecified, 'none', or 'None', then the default value of 3 is "+ "used. If the color columns are unspecified, 'none' or "+ "'None', then [1,1,1] is used, and finally the default "+ "for the alpha column is 0.5. If any of the values for the color "+ "columns are less than zero or greater than 1, then that color "+ "column is rescaled to [0,1].")], ["table","yt-vertex-list", "Draw a line from a series of vertices in a table.", "<x column> <y column> <z column> [kwargs]", "Create a series of yt LineSource objects in a visualization "+ "using the three specified columns as vertices. One line segment "+ "will be drawn from the values in the first row to the values in "+ "the second row, one line segment from the second row to the "+ "third row, and so on"], ["table","plot1","Plot the specified column.","<y> [kwargs]", "Plot column <y> versus row number. Some "+ "useful kwargs are color (c), "+ "dashes, linestyle (ls), linewidth (lw), marker, markeredgecolor (mec), "+ "markeredgewidth (mew), markerfacecolor (mfc), markerfacecoloralt "+ "(mfcalt), markersize (ms). For example: o2 -create x 0 10 0.2 "+ "-function sin(x) y -plot1 y ls='--',marker='o' -show"], ["table","hist-plot","Create a histogram plot from column in a table.", "<col>","For a table, create a histogram plot from the "+ "specified column. This command uses matplotlib to construct the "+ "histogram rather than using O2scl to create a hist_2d object."], ["table","hist2d-plot", "Create a 2-D histogram plot from two columns in a table.", "<col x> <col y>","For a table, create a 2D histogram plot from "+ "the specified columns. This command uses matplotlib to construct the "+ "histogram rather than using O2scl to create a hist object."], ["table3d","den-plot","Create a density plot from a table3d object.", "<slice>", "Creates a density plot from the specified "+ "slice. A z-axis density legend "+ "is print on the RHS if colbar is set to True before plotting. "+ "If z-axis limits are specified, then values larger than the upper "+ "limit "+ "are set equal to the upper limit and values smaller than the lower "+ "limit are set equal to the lower limit before plotting. The x- "+ "and y-axis limits (xlo,xhi,ylo,yhi) are ignored. The python "+ "function imshow() is used, unless 'pcm=True' is specified, in "+ "which case the pcolormesh() function is used instead. When "+ "'pcm=False', logarithmic scales are handled by "+ "taking the base 10 log of the x- or y-grids "+ "specified in the table3d object before plotting. When 'pcm=True', " "logarithmic axes can be handled automatically. The imshow() "+ "function presumes a uniform linear or logarithmic x- and y-axis "+ "grid, and the den-plot function will output a warning if this "+ "is not the case. The pcolormesh() function can handle arbitrary "+ "x and y-axis grids. Some useful kwargs are cmap, interpolation "+ "(for imshow), alpha, vmin, and vmax."], ["table3d","den-plot-rgb","Create a (R,G,B) density plot from a table3d.", "<slice_r> <slice_g> <slice_b>","Create a density plot from "+ "the three specified slices. This command uses imshow()."], ["hist","plot","Plot the histogram.","[kwargs]", "Plot the histogram weights as a function "+ "of the bin representative values. "+ "Some useful kwargs (which apply for all three object types) are "+ "color (c), dashes, linestyle (ls), linewidth (lw), marker, "+ "markeredgecolor (mec), markeredgewidth (mew), markerfacecolor (mfc), "+ "markerfacecoloralt (mfcalt), markersize (ms). For example: o2graph "+ "-create x 0 10 0.2 -function sin(x) y "+ "-plot x y lw=0,marker='+' -show"], ["double[]","plot1","Plot the array.","[kwargs]", "Plot the array. Some useful kwargs "+ "are color (c), dashes, linestyle (ls), linewidth (lw), marker, "+ "markeredgecolor (mec), markeredgewidth (mew), markerfacecolor (mfc), "+ "markerfacecoloralt (mfcalt), markersize (ms)."], ["int[]","plot1","Plot the array.","[kwargs]", "Plot the array. Some useful kwargs "+ "are color (c), dashes, linestyle (ls), linewidth (lw), marker, "+ "markeredgecolor (mec), markeredgewidth (mew), markerfacecolor (mfc), "+ "markerfacecoloralt (mfcalt), markersize (ms)."], ["size_t[]","plot1","Plot the array.","[kwargs]", "Plot the array. Some useful kwargs "+ "are color (c), dashes, linestyle (ls), linewidth (lw), marker, "+ "markeredgecolor (mec), markeredgewidth (mew), markerfacecolor (mfc), "+ "markerfacecoloralt (mfcalt), markersize (ms)."], ["vector<contour_line>","plot","Plot the contour lines.","[kwargs]", "Plot the set of contour lines. Some "+ "useful kwargs (which apply for all three "+ "object types) are color (c), dashes, linestyle (ls), linewidth (lw), "+ "marker, markeredgecolor (mec), markeredgewidth (mew), markerfacecolor "+ "(mfc), markerfacecoloralt (mfcalt), markersize (ms). For example: "+ "o2graph -create x 0 10 0.2 -function sin(x) y -plot x y "+ "lw=0,marker='+' -show"], ["hist_2d","den-plot","Create a density plot from a hist_2d object", "[kwargs]","Create a density plot from the current histogram (assuming "+ "equally-spaced bins). Logarithmic x- or y-axes are handled by taking "+ "the base 10 log of the x- or y-grids specified in the hist_2d object "+ "before plotting. A z-axis density legend is print on the RHS if colbar "+ "is set to 1 before plotting. If z-axis limits are specified, then "+ "values larger than the upper limit are set equal to the upper limit "+ "and values smaller than the lower limit are set equal to the lower "+ "limit before plotting."], ["tensor","den-plot","Create a density plot from a tensor object", "[index_1 index_2] [kwargs]", "Create a density plot from the current tensor. "+ "If the tensor has rank 2 and the indices are not specifed, then "+ "plot the first index along the x-axis and the second index along "+ "the y-axis. "+ "A z-axis density legend is print on the RHS if colbar "+ "is set to 1 before plotting. If z-axis limits are specified, then "+ "values larger than the upper limit are set equal to the upper limit "+ "and values smaller than the lower limit are set equal to the lower "+ "limit before plotting."], ["tensor<int>","den-plot","Create a density plot from a tensor object", "[index_1 index_2] [kwargs]", "Create a density plot from the current tensor. "+ "If the tensor has rank 2 and the indices are not specifed, then "+ "plot the first index along the x-axis and the second index along "+ "the y-axis. "+ "A z-axis density legend is print on the RHS if colbar "+ "is set to 1 before plotting. If z-axis limits are specified, then "+ "values larger than the upper limit are set equal to the upper limit "+ "and values smaller than the lower limit are set equal to the lower "+ "limit before plotting."], ["tensor<size_t>","den-plot","Create a density plot from a tensor object", "[index_1 index_2] [kwargs]", "Create a density plot from the current tensor. "+ "If the tensor has rank 2 and the indices are not specifed, then "+ "plot the first index along the x-axis and the second index along "+ "the y-axis. "+ "A z-axis density legend is print on the RHS if colbar "+ "is set to 1 before plotting. If z-axis limits are specified, then "+ "values larger than the upper limit are set equal to the upper limit "+ "and values smaller than the lower limit are set equal to the lower "+ "limit before plotting."], ["tensor_grid","den-plot","Create a density plot from a tensor object", "[index_1 index_2] [kwargs]", "Create a density plot from the current tensor. "+ "If the tensor has rank 2 and the indices are not specifed, then "+ "plot the first index along the x-axis and the second index along "+ "the y-axis. "+ "A z-axis density legend is print on the RHS if colbar "+ "is set to 1 before plotting. If z-axis limits are specified, then "+ "values larger than the upper limit are set equal to the upper limit "+ "and values smaller than the lower limit are set equal to the lower "+ "limit before plotting."], ["tensor_grid","yt-add-vol", "Add a tensor_grid object as a yt volume source", "[kwargs]","This adds the volumetric data specified in the "+ "tensor_grid object as a yt volume source. The transfer "+ "function previously specified by 'yt-tf' is used, or if "+ "unspecified, then yt's transfer_function_helper is used "+ "to create a 3 layer default transfer function."], ["tensor_grid","den-plot-anim", "Create an animated density plot from a tensor_grid object. ", "<x index> <y index> <z index [+'r']> <mp4 filename>", "(Requires ffmpeg.)"], ] """ This is a list of 5-element entries: 1: object type 2: command name 3: short description 4: argument list 5: full help text """ param_list=[ ["colbar","If true, den-plot adds a color legend (default False)."], ["editor","If true, open the plot editor."], ["fig_dict",("Dictionary for figure properties. The default value is "+ "blank and implies ('fig_size_x=6.0, fig_size_y=6.0, "+ "ticks_in=False, "+ "rt_ticks=False, left_margin=0.14, right_margin=0.04, "+ "bottom_margin=0.12, top_margin=0.04, fontsize=16') ."+ "The x and y sizes of the figure object are in "+ "fig_size_x and fig_size_y. The value ticks_in refers "+ "to whether or not the ticks are inside or outside the "+ "plot. The value of rt_ticks refers to whether or not "+ "tick marks are plotted on the right and top sides of "+ "the plot. If the font size is unspecified, then "+ "the 'font' setting is used. "+ "The font size parameter is multiplied by 0.8 "+ "and then used for the axis labels. Note that this "+ "value must be set before the plotting canvas is"+ "created (which is done by 'subplots' or automatically "+ "when the first object is added to the plot) in order "+ "to have any effect.")], ["font","Font scaling for text objects (default 16)."], ["logx","If true, use a logarithmic x-axis (default False)."], ["logy","If true, use a logarithmic y-axis (default False)."], ["logz","If true, use a logarithmic z-axis (default False)."], ["usetex","If true, use LaTeX for text rendering (default True)."], ["verbose","Verbosity parameter (default 1)."], ["xhi","Upper limit for x-axis (function if starts with '(')."], ["xlo","Lower limit for x-axis (function if starts with '(')."], ["xset","If true, x-axis limits have been set (default False)."], ["yhi","Upper limit for y-axis (function if starts with '(')."], ["ylo","Lower limit for y-axis (function if starts with '(')."], ["yset","If true, y-axis limits have been set (default False)."], ["zlo","Lower limit for z-axis (function if starts with '(')."], ["zhi","Upper limit for z-axis (function if starts with '(')."], ["zset","If true, z-axis limits have been set (default False)."] ] """ List of o2sclpy parameters A list of 2-element entries, name and description """ yt_param_list=[ ["yt_filter","Filter for yt-generated images (default '')"], ["yt_focus","The camera focus (default is the center of the volume)."], ["yt_position","The camera position "+ "(default is '[1.5,0.6,0.7] internal')."], ["yt_north","The camera north vector (default [0.0,0.0,1.0])."], ["yt_width","The camera width relative to the domain volume< "+ "(default [1.5,1.5,1.5])."], ["yt_resolution","The rendering resolution (default (512,512))."], ["yt_sigma_clip","Sigma clipping parameter (default 4.0)."] ] """ List of yt parameters for o2sclpy A list of 2-element entries, name and description """ acol_help_topics=["functions","mult-vector-spec","strings-spec", "types","value-spec","vector-spec"] o2graph_help_topics=["cmaps","cmaps-plot","colors","colors-plot", "colors-near","markers","markers-plot", "xkcd-colors"]
gpl-3.0
rlugojr/incubator-zeppelin
python/src/main/resources/bootstrap.py
2
3876
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # PYTHON 2 / 3 comptability : # bootstrap.py must be runnable with Python 2 and 3 # Remove interactive mode displayhook import sys import signal try: import StringIO as io except ImportError: import io as io sys.displayhook = lambda x: None def intHandler(signum, frame): # Set the signal handler print ("Paragraph interrupted") raise KeyboardInterrupt() signal.signal(signal.SIGINT, intHandler) def help(): print ('%html') print ('<h2>Python Interpreter help</h2>') print ('<h3>Python 2 & 3 comptability</h3>') print ('<p>The interpreter is compatible with Python 2 & 3.<br/>') print ('To change Python version, ') print ('change in the interpreter configuration the python to the ') print ('desired version (example : python=/usr/bin/python3)</p>') print ('<h3>Python modules</h3>') print ('<p>The interpreter can use all modules already installed ') print ('(with pip, easy_install, etc)</p>') print ('<h3>Forms</h3>') print ('You must install py4j in order to use ' 'the form feature (pip install py4j)') print ('<h4>Input form</h4>') print ('<pre>print (z.input("f1","defaultValue"))</pre>') print ('<h4>Selection form</h4>') print ('<pre>print(z.select("f2", [("o1","1"), ("o2","2")],2))</pre>') print ('<h4>Checkbox form</h4>') print ('<pre> print("".join(z.checkbox("f3", [("o1","1"), ' '("o2","2")],["1"])))</pre>') print ('<h3>Matplotlib graph</h3>') print ('<div>The interpreter can display matplotlib graph with ') print ('the function zeppelin_show()</div>') print ('<div> You need to already have matplotlib module installed ') print ('to use this functionality !</div><br/>') print ('''<pre>import matplotlib.pyplot as plt plt.figure() (.. ..) zeppelin_show(plt) plt.close() </pre>''') print ('<div><br/> zeppelin_show function can take optional parameters ') print ('to adapt graph width and height</div>') print ("<div><b>example </b>:") print('''<pre>zeppelin_show(plt,width='50px') zeppelin_show(plt,height='150px') </pre></div>''') # Matplotlib show function def zeppelin_show(p, width="0", height="0"): img = io.StringIO() p.savefig(img, format='svg') img.seek(0) style = "" if(width != "0"): style += 'width:'+width if(height != "0"): if(len(style) != 0): style += "," style += 'height:'+height print("%html <div style='" + style + "'>" + img.read() + "<div>") # If py4j is detected, these class will be override # with the implementation in bootstrap_input.py class PyZeppelinContext(): errorMsg = "You must install py4j Python module " \ "(pip install py4j) to use Zeppelin dynamic forms features" def __init__(self, zc): self.z = zc def input(self, name, defaultValue=""): print (self.errorMsg) def select(self, name, options, defaultValue=""): print (self.errorMsg) def checkbox(self, name, options, defaultChecked=[]): print (self.errorMsg) z = PyZeppelinContext("")
apache-2.0
HazyResearch/metal
metal/analysis.py
1
11061
from collections import Counter, defaultdict import numpy as np import scipy.sparse as sparse from pandas import DataFrame, Series from metal.utils import arraylike_to_numpy ############################################################ # Label Matrix Diagnostics ############################################################ def _covered_data_points(L): """Returns an indicator vector where ith element = 1 if x_i is labeled by at least one LF.""" return np.ravel(np.where(L.sum(axis=1) != 0, 1, 0)) def _overlapped_data_points(L): """Returns an indicator vector where ith element = 1 if x_i is labeled by more than one LF.""" return np.where(np.ravel((L != 0).sum(axis=1)) > 1, 1, 0) def _conflicted_data_points(L): """Returns an indicator vector where ith element = 1 if x_i is labeled by at least two LFs that give it disagreeing labels.""" m = sparse.diags(np.ravel(L.max(axis=1).todense())) return np.ravel(np.max(m @ (L != 0) != L, axis=1).astype(int).todense()) def label_coverage(L): """Returns the **fraction of data points with > 0 (non-zero) labels** Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith item """ return _covered_data_points(L).sum() / L.shape[0] def label_overlap(L): """Returns the **fraction of data points with > 1 (non-zero) labels** Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith item """ return _overlapped_data_points(L).sum() / L.shape[0] def label_conflict(L): """Returns the **fraction of data points with conflicting (disagreeing) lablels.** Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith item """ return _conflicted_data_points(L).sum() / L.shape[0] def lf_polarities(L): """Return the polarities of each LF based on evidence in a label matrix. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate """ polarities = [sorted(list(set(L[:, i].data))) for i in range(L.shape[1])] return [p[0] if len(p) == 1 else p for p in polarities] def lf_coverages(L): """Return the **fraction of data points that each LF labels.** Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate """ return np.ravel((L != 0).sum(axis=0)) / L.shape[0] def lf_overlaps(L, normalize_by_coverage=False): """Return the **fraction of items each LF labels that are also labeled by at least one other LF.** Note that the maximum possible overlap fraction for an LF is the LF's coverage, unless `normalize_by_coverage=True`, in which case it is 1. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate normalize_by_coverage: Normalize by coverage of the LF, so that it returns the percent of LF labels that have overlaps. """ overlaps = (L != 0).T @ _overlapped_data_points(L) / L.shape[0] if normalize_by_coverage: overlaps /= lf_coverages(L) return np.nan_to_num(overlaps) def lf_conflicts(L, normalize_by_overlaps=False): """Return the **fraction of items each LF labels that are also given a different (non-abstain) label by at least one other LF.** Note that the maximum possible conflict fraction for an LF is the LF's overlaps fraction, unless `normalize_by_overlaps=True`, in which case it is 1. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate normalize_by_overlaps: Normalize by overlaps of the LF, so that it returns the percent of LF overlaps that have conflicts. """ conflicts = (L != 0).T @ _conflicted_data_points(L) / L.shape[0] if normalize_by_overlaps: conflicts /= lf_overlaps(L) return np.nan_to_num(conflicts) def lf_empirical_accuracies(L, Y): """Return the **empirical accuracy** against a set of labels Y (e.g. dev set) for each LF. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels """ # Assume labeled set is small, work with dense matrices Y = arraylike_to_numpy(Y) L = L.toarray() X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1)) return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1) def lf_summary(L, Y=None, lf_names=None, est_accs=None): """Returns a pandas DataFrame with the various per-LF statistics. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels. If provided, the empirical accuracy for each LF will be calculated """ n, m = L.shape if lf_names is not None: col_names = ["j"] d = {"j": list(range(m))} else: lf_names = list(range(m)) col_names = [] d = {} # Default LF stats col_names.extend(["Polarity", "Coverage", "Overlaps", "Conflicts"]) d["Polarity"] = Series(data=lf_polarities(L), index=lf_names) d["Coverage"] = Series(data=lf_coverages(L), index=lf_names) d["Overlaps"] = Series(data=lf_overlaps(L), index=lf_names) d["Conflicts"] = Series(data=lf_conflicts(L), index=lf_names) if Y is not None: col_names.extend(["Correct", "Incorrect", "Emp. Acc."]) confusions = [ confusion_matrix(Y, L[:, i], pretty_print=False) for i in range(m) ] corrects = [np.diagonal(conf).sum() for conf in confusions] incorrects = [ conf.sum() - correct for conf, correct in zip(confusions, corrects) ] accs = lf_empirical_accuracies(L, Y) d["Correct"] = Series(data=corrects, index=lf_names) d["Incorrect"] = Series(data=incorrects, index=lf_names) d["Emp. Acc."] = Series(data=accs, index=lf_names) if est_accs is not None: col_names.append("Learned Acc.") d["Learned Acc."] = Series(est_accs, index=lf_names) return DataFrame(data=d, index=lf_names)[col_names] def single_lf_summary(Y_p, Y=None): """Calculates coverage, overlap, conflicts, and accuracy for a single LF Args: Y_p: a np.array or torch.Tensor of predicted labels Y: a np.array or torch.Tensor of true labels (if known) """ L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape(-1, 1)) return lf_summary(L, Y) def error_buckets(gold, pred, X=None): """Group items by error buckets Args: gold: an array-like of gold labels (ints) pred: an array-like of predictions (ints) X: an iterable of items Returns: buckets: A dict of items where buckets[i,j] is a list of items with predicted label i and true label j. If X is None, return indices instead. For a binary problem with (1=positive, 2=negative): buckets[1,1] = true positives buckets[1,2] = false positives buckets[2,1] = false negatives buckets[2,2] = true negatives """ buckets = defaultdict(list) gold = arraylike_to_numpy(gold) pred = arraylike_to_numpy(pred) for i, (y, l) in enumerate(zip(pred, gold)): buckets[y, l].append(X[i] if X is not None else i) return buckets def confusion_matrix( gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True ): """A shortcut method for building a confusion matrix all at once. Args: gold: an array-like of gold labels (ints) pred: an array-like of predictions (ints) null_pred: If True, include the row corresponding to null predictions null_gold: If True, include the col corresponding to null gold labels normalize: if True, divide counts by the total number of items pretty_print: if True, pretty-print the matrix before returning """ conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold) gold = arraylike_to_numpy(gold) pred = arraylike_to_numpy(pred) conf.add(gold, pred) mat = conf.compile() if normalize: mat = mat / len(gold) if pretty_print: conf.display(normalize=normalize) return mat class ConfusionMatrix(object): """ An iteratively built abstention-aware confusion matrix with pretty printing Assumed axes are true label on top, predictions on the side. """ def __init__(self, null_pred=False, null_gold=False): """ Args: null_pred: If True, include the row corresponding to null predictions null_gold: If True, include the col corresponding to null gold labels """ self.counter = Counter() self.mat = None self.null_pred = null_pred self.null_gold = null_gold def __repr__(self): if self.mat is None: self.compile() return str(self.mat) def add(self, gold, pred): """ Args: gold: a np.ndarray of gold labels (ints) pred: a np.ndarray of predictions (ints) """ self.counter.update(zip(gold, pred)) def compile(self, trim=True): k = max([max(tup) for tup in self.counter.keys()]) + 1 # include 0 mat = np.zeros((k, k), dtype=int) for (y, l), v in self.counter.items(): mat[l, y] = v if trim and not self.null_pred: mat = mat[1:, :] if trim and not self.null_gold: mat = mat[:, 1:] self.mat = mat return mat def display(self, normalize=False, indent=0, spacing=2, decimals=3, mark_diag=True): mat = self.compile(trim=False) m, n = mat.shape tab = " " * spacing margin = " " * indent # Print headers s = margin + " " * (5 + spacing) for j in range(n): if j == 0 and not self.null_gold: continue s += f" y={j} " + tab print(s) # Print data for i in range(m): # Skip null predictions row if necessary if i == 0 and not self.null_pred: continue s = margin + f" l={i} " + tab for j in range(n): # Skip null gold if necessary if j == 0 and not self.null_gold: continue else: if i == j and mark_diag and normalize: s = s[:-1] + "*" if normalize: s += f"{mat[i,j]/sum(mat[i,1:]):>5.3f}" + tab else: s += f"{mat[i,j]:^5d}" + tab print(s)
apache-2.0
nelango/ViralityAnalysis
model/lib/sklearn/__init__.py
4
3034
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module='^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.17' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'qda', 'random_projection', 'semi_supervised', 'svm', 'tree', 'discriminant_analysis', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
mit
joshbohde/scikit-learn
sklearn/linear_model/sparse/stochastic_gradient.py
2
15801
# Author: Peter Prettenhofer <[email protected]> # # License: BSD Style. """Implementation of Stochastic Gradient Descent (SGD) with sparse data.""" import numpy as np import scipy.sparse as sp from ...externals.joblib import Parallel, delayed from ..base import BaseSGDClassifier, BaseSGDRegressor from ..sgd_fast_sparse import plain_sgd ## TODO add flag for intercept learning rate heuristic ## class SGDClassifier(BaseSGDClassifier): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works on scipy.sparse X and dense coef_. Parameters ---------- loss : str, 'hinge' or 'log' or 'modified_huber' The loss function to be used. Defaults to 'hinge'. The hinge loss is a margin loss used by standard linear SVM models. The 'log' loss is the loss of logistic regression models and can be used for probability estimation in binary classifiers. 'modified_huber' is another smooth loss that brings tolerance to outliers. penalty : str, 'l2' or 'l1' or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' migh bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 rho : float The Elastic Net mixing parameter, with 0 < rho <= 1. Defaults to 0.85. fit_intercept: bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter: int The number of passes over the training data (aka epochs). Defaults to 5. shuffle: bool Whether or not the training data should be shuffled after each epoch. Defaults to False. seed: int, optional The seed of the pseudo random number generator to use when shuffling the data. verbose: integer, optional The verbosity level n_jobs: integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. learning_rate : string, optional The learning rate: constant: eta = eta0 optimal: eta = 1.0/(t+t0) [default] invscaling: eta = eta0 / pow(t, power_t) eta0 : double, optional The initial learning rate [default 0.01]. power_t : double, optional The exponent for inverse scaling learning rate [default 0.25]. Attributes ---------- `coef_` : array, shape = [1, n_features] if n_classes == 2 else [n_classes, n_features] Weights assigned to the features. `intercept_` : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. `sparse_coef_` : sparse.csr_matrix, , shape = [1, n_features] if n_classes == 2 else [n_classes, n_features] Weights represented as Row Compressed Matrix. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> clf = linear_model.sparse.SGDClassifier() >>> clf.fit(X, y) SGDClassifier(alpha=0.0001, eta0=0.0, fit_intercept=True, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, rho=1.0, seed=0, shuffle=False, verbose=0) >>> print clf.predict([[-0.8, -1]]) [ 1.] See also -------- LinearSVC, LogisticRegression """ def _set_coef(self, coef_): self.coef_ = coef_ if coef_ is None: self.sparse_coef_ = None else: # sparse representation of the fitted coef for the predict method self.sparse_coef_ = sp.csr_matrix(coef_) def _fit_binary(self, X, y): """Fit a binary classifier. """ # interprete X as CSR matrix X = sp.csr_matrix(X) # encode original class labels as 1 (classes[1]) or -1 (classes[0]). y_new = np.ones(y.shape, dtype=np.float64, order="C") * -1.0 y_new[y == self.classes[1]] = 1.0 y = y_new # get sparse matrix datastructures X_data = np.array(X.data, dtype=np.float64, order="C") X_indices = np.array(X.indices, dtype=np.int32, order="C") X_indptr = np.array(X.indptr, dtype=np.int32, order="C") coef_, intercept_ = plain_sgd(self.coef_, self.intercept_, self.loss_function, self.penalty_type, self.alpha, self.rho, X_data, X_indices, X_indptr, y, self.n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), int(self.seed), self.class_weight[1], self.class_weight[0], self.sample_weight, self.learning_rate_code, self.eta0, self.power_t) # update self.coef_ and self.sparse_coef_ consistently self._set_coef(np.atleast_2d(self.coef_)) self.intercept_ = np.asarray(intercept_) def _fit_multiclass(self, X, y): """Fit a multi-class classifier with a combination of binary classifiers, each predicts one class versus all others (OVA: One Versus All). """ # interprete X as CSR matrix X = sp.csr_matrix(X) # get sparse matrix datastructures X_data = np.array(X.data, dtype=np.float64, order="C") X_indices = np.array(X.indices, dtype=np.int32, order="C") X_indptr = np.array(X.indptr, dtype=np.int32, order="C") res = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_train_ova_classifier)(i, c, X_data, X_indices, X_indptr, y, self.coef_[i], self.intercept_[i], self.loss_function, self.penalty_type, self.alpha, self.rho, self.n_iter, self.fit_intercept, self.verbose, self.shuffle, self.seed, self.class_weight[i], self.sample_weight, self.learning_rate_code, self.eta0, self.power_t) for i, c in enumerate(self.classes)) for i, coef, intercept in res: self.coef_[i] = coef self.intercept_[i] = intercept self._set_coef(self.coef_) self.intercept_ = self.intercept_ def decision_function(self, X): """Predict signed 'distance' to the hyperplane (aka confidence score). Parameters ---------- X : scipy.sparse matrix of shape [n_samples, n_features] Returns ------- array, shape = [n_samples] if n_classes == 2 else [n_samples,n_classes] The signed 'distances' to the hyperplane(s). """ # np.dot only works correctly if both arguments are sparse matrices if not sp.issparse(X): X = sp.csr_matrix(X) scores = np.asarray(np.dot(X, self.sparse_coef_.T).todense() + self.intercept_) if self.classes.shape[0] == 2: return np.ravel(scores) else: return scores def _train_ova_classifier(i, c, X_data, X_indices, X_indptr, y, coef_, intercept_, loss_function, penalty_type, alpha, rho, n_iter, fit_intercept, verbose, shuffle, seed, class_weight_pos, sample_weight, learning_rate, eta0, power_t): """Inner loop for One-vs.-All scheme""" y_i = np.ones(y.shape, dtype=np.float64, order='C') * -1.0 y_i[y == c] = 1.0 coef, intercept = plain_sgd(coef_, intercept_, loss_function, penalty_type, alpha, rho, X_data, X_indices, X_indptr, y_i, n_iter, int(fit_intercept), int(verbose), int(shuffle), int(seed), class_weight_pos, 1.0, sample_weight, learning_rate, eta0, power_t) return (i, coef, intercept) class SGDRegressor(BaseSGDRegressor): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Parameters ---------- loss : str, 'squared_loss' or 'huber' The loss function to be used. Defaults to 'squared_loss' which refers to the ordinary least squares fit. 'huber' is an epsilon insensitive loss function for robust regression. penalty : str, 'l2' or 'l1' or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' migh bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 rho : float The Elastic Net mixing parameter, with 0 < rho <= 1. Defaults to 0.85. fit_intercept: bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter: int The number of passes over the training data (aka epochs). Defaults to 5. shuffle: bool Whether or not the training data should be shuffled after each epoch. Defaults to False. seed: int, optional The seed of the pseudo random number generator to use when shuffling the data. verbose: integer, optional The verbosity level p : float Epsilon in the epsilon insensitive huber loss function; only if `loss=='huber'`. learning_rate : string, optional The learning rate: constant: eta = eta0 optimal: eta = 1.0/(t+t0) invscaling: eta = eta0 / pow(t, power_t) [default] eta0 : double, optional The initial learning rate [default 0.01]. power_t : double, optional The exponent for inverse scaling learning rate [default 0.25]. Attributes ---------- `coef_` : array, shape = [n_features] Weights asigned to the features. `intercept_` : array, shape = [1] The intercept term. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = linear_model.sparse.SGDRegressor() >>> clf.fit(X, y) SGDRegressor(alpha=0.0001, eta0=0.01, fit_intercept=True, learning_rate='invscaling', loss='squared_loss', n_iter=5, p=0.1, penalty='l2', power_t=0.25, rho=1.0, seed=0, shuffle=False, verbose=0) See also -------- RidgeRegression, ElasticNet, Lasso, SVR """ def _set_coef(self, coef_): self.coef_ = coef_ if coef_ is None: self.sparse_coef_ = None else: # sparse representation of the fitted coef for the predict method self.sparse_coef_ = sp.csr_matrix(coef_) def _fit_regressor(self, X, y): # interprete X as CSR matrix X = sp.csr_matrix(X) # get sparse matrix datastructures X_data = np.array(X.data, dtype=np.float64, order="C") X_indices = np.array(X.indices, dtype=np.int32, order="C") X_indptr = np.array(X.indptr, dtype=np.int32, order="C") coef_, intercept_ = plain_sgd(self.coef_, self.intercept_, self.loss_function, self.penalty_type, self.alpha, self.rho, X_data, X_indices, X_indptr, y, self.n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), int(self.seed), 1.0, 1.0, self.sample_weight, self.learning_rate_code, self.eta0, self.power_t) # update self.coef_ and self.sparse_coef_ consistently self._set_coef(self.coef_) self.intercept_ = np.asarray(intercept_) def predict(self, X): """Predict using the linear model Parameters ---------- X : array or scipy.sparse matrix of shape [n_samples, n_features] Whether the numpy.array or scipy.sparse matrix is accepted dependes on the actual implementation Returns ------- array, shape = [n_samples] Array containing the predicted class labels. """ # np.dot only works correctly if both arguments are sparse matrices if not sp.issparse(X): X = sp.csr_matrix(X) scores = np.asarray(np.dot(X, self.sparse_coef_.T).todense() + self.intercept_).ravel() return scores
bsd-3-clause
snap-stanford/ogb
examples/graphproppred/code2/main_pyg.py
1
11272
import torch from torch_geometric.data import DataLoader import torch.optim as optim import torch.nn.functional as F from torchvision import transforms from gnn import GNN from tqdm import tqdm import argparse import time import numpy as np import pandas as pd import os ### importing OGB from ogb.graphproppred import PygGraphPropPredDataset, Evaluator ### importing utils from utils import ASTNodeEncoder, get_vocab_mapping ### for data transform from utils import augment_edge, encode_y_to_arr, decode_arr_to_seq multicls_criterion = torch.nn.CrossEntropyLoss() def train(model, device, loader, optimizer): model.train() loss_accum = 0 for step, batch in enumerate(tqdm(loader, desc="Iteration")): batch = batch.to(device) if batch.x.shape[0] == 1 or batch.batch[-1] == 0: pass else: pred_list = model(batch) optimizer.zero_grad() loss = 0 for i in range(len(pred_list)): loss += multicls_criterion(pred_list[i].to(torch.float32), batch.y_arr[:,i]) loss = loss / len(pred_list) loss.backward() optimizer.step() loss_accum += loss.item() print('Average training loss: {}'.format(loss_accum / (step + 1))) def eval(model, device, loader, evaluator, arr_to_seq): model.eval() seq_ref_list = [] seq_pred_list = [] for step, batch in enumerate(tqdm(loader, desc="Iteration")): batch = batch.to(device) if batch.x.shape[0] == 1: pass else: with torch.no_grad(): pred_list = model(batch) mat = [] for i in range(len(pred_list)): mat.append(torch.argmax(pred_list[i], dim = 1).view(-1,1)) mat = torch.cat(mat, dim = 1) seq_pred = [arr_to_seq(arr) for arr in mat] # PyG = 1.4.3 # seq_ref = [batch.y[i][0] for i in range(len(batch.y))] # PyG >= 1.5.0 seq_ref = [batch.y[i] for i in range(len(batch.y))] seq_ref_list.extend(seq_ref) seq_pred_list.extend(seq_pred) input_dict = {"seq_ref": seq_ref_list, "seq_pred": seq_pred_list} return evaluator.eval(input_dict) def main(): # Training settings parser = argparse.ArgumentParser(description='GNN baselines on ogbg-code2 data with Pytorch Geometrics') parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)') parser.add_argument('--gnn', type=str, default='gcn-virtual', help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gcn-virtual)') parser.add_argument('--drop_ratio', type=float, default=0, help='dropout ratio (default: 0)') parser.add_argument('--max_seq_len', type=int, default=5, help='maximum sequence length to predict (default: 5)') parser.add_argument('--num_vocab', type=int, default=5000, help='the number of vocabulary used for sequence prediction (default: 5000)') parser.add_argument('--num_layer', type=int, default=5, help='number of GNN message passing layers (default: 5)') parser.add_argument('--emb_dim', type=int, default=300, help='dimensionality of hidden units in GNNs (default: 300)') parser.add_argument('--batch_size', type=int, default=128, help='input batch size for training (default: 128)') parser.add_argument('--epochs', type=int, default=25, help='number of epochs to train (default: 25)') parser.add_argument('--random_split', action='store_true') parser.add_argument('--num_workers', type=int, default=0, help='number of workers (default: 0)') parser.add_argument('--dataset', type=str, default="ogbg-code2", help='dataset name (default: ogbg-code2)') parser.add_argument('--filename', type=str, default="", help='filename to output result (default: )') args = parser.parse_args() print(args) device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu") ### automatic dataloading and splitting dataset = PygGraphPropPredDataset(name = args.dataset) seq_len_list = np.array([len(seq) for seq in dataset.data.y]) print('Target seqence less or equal to {} is {}%.'.format(args.max_seq_len, np.sum(seq_len_list <= args.max_seq_len) / len(seq_len_list))) split_idx = dataset.get_idx_split() if args.random_split: print('Using random split') perm = torch.randperm(len(dataset)) num_train, num_valid, num_test = len(split_idx['train']), len(split_idx['valid']), len(split_idx['test']) split_idx['train'] = perm[:num_train] split_idx['valid'] = perm[num_train:num_train+num_valid] split_idx['test'] = perm[num_train+num_valid:] assert(len(split_idx['train']) == num_train) assert(len(split_idx['valid']) == num_valid) assert(len(split_idx['test']) == num_test) # print(split_idx['train']) # print(split_idx['valid']) # print(split_idx['test']) # train_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['train']] # valid_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['valid']] # test_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['test']] # print('#train') # print(len(train_method_name)) # print('#valid') # print(len(valid_method_name)) # print('#test') # print(len(test_method_name)) # train_method_name_set = set(train_method_name) # valid_method_name_set = set(valid_method_name) # test_method_name_set = set(test_method_name) # # unique method name # print('#unique train') # print(len(train_method_name_set)) # print('#unique valid') # print(len(valid_method_name_set)) # print('#unique test') # print(len(test_method_name_set)) # # unique valid/test method name # print('#valid unseen during training') # print(len(valid_method_name_set - train_method_name_set)) # print('#test unseen during training') # print(len(test_method_name_set - train_method_name_set)) ### building vocabulary for sequence predition. Only use training data. vocab2idx, idx2vocab = get_vocab_mapping([dataset.data.y[i] for i in split_idx['train']], args.num_vocab) # test encoder and decoder # for data in dataset: # # PyG >= 1.5.0 # print(data.y) # # # PyG 1.4.3 # # print(data.y[0]) # data = encode_y_to_arr(data, vocab2idx, args.max_seq_len) # print(data.y_arr[0]) # decoded_seq = decode_arr_to_seq(data.y_arr[0], idx2vocab) # print(decoded_seq) # print('') ## test augment_edge # data = dataset[2] # print(data) # data_augmented = augment_edge(data) # print(data_augmented) ### set the transform function # augment_edge: add next-token edge as well as inverse edges. add edge attributes. # encode_y_to_arr: add y_arr to PyG data object, indicating the array representation of a sequence. dataset.transform = transforms.Compose([augment_edge, lambda data: encode_y_to_arr(data, vocab2idx, args.max_seq_len)]) ### automatic evaluator. takes dataset name as input evaluator = Evaluator(args.dataset) train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers) valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers) test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers) nodetypes_mapping = pd.read_csv(os.path.join(dataset.root, 'mapping', 'typeidx2type.csv.gz')) nodeattributes_mapping = pd.read_csv(os.path.join(dataset.root, 'mapping', 'attridx2attr.csv.gz')) print(nodeattributes_mapping) ### Encoding node features into emb_dim vectors. ### The following three node features are used. # 1. node type # 2. node attribute # 3. node depth node_encoder = ASTNodeEncoder(args.emb_dim, num_nodetypes = len(nodetypes_mapping['type']), num_nodeattributes = len(nodeattributes_mapping['attr']), max_depth = 20) if args.gnn == 'gin': model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gin', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device) elif args.gnn == 'gin-virtual': model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gin', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device) elif args.gnn == 'gcn': model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gcn', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device) elif args.gnn == 'gcn-virtual': model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gcn', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device) else: raise ValueError('Invalid GNN type') optimizer = optim.Adam(model.parameters(), lr=0.001) print(f'#Params: {sum(p.numel() for p in model.parameters())}') valid_curve = [] test_curve = [] train_curve = [] for epoch in range(1, args.epochs + 1): print("=====Epoch {}".format(epoch)) print('Training...') train(model, device, train_loader, optimizer) print('Evaluating...') train_perf = eval(model, device, train_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab)) valid_perf = eval(model, device, valid_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab)) test_perf = eval(model, device, test_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab)) print({'Train': train_perf, 'Validation': valid_perf, 'Test': test_perf}) train_curve.append(train_perf[dataset.eval_metric]) valid_curve.append(valid_perf[dataset.eval_metric]) test_curve.append(test_perf[dataset.eval_metric]) print('F1') best_val_epoch = np.argmax(np.array(valid_curve)) best_train = max(train_curve) print('Finished training!') print('Best validation score: {}'.format(valid_curve[best_val_epoch])) print('Test score: {}'.format(test_curve[best_val_epoch])) if not args.filename == '': result_dict = {'Val': valid_curve[best_val_epoch], 'Test': test_curve[best_val_epoch], 'Train': train_curve[best_val_epoch], 'BestTrain': best_train} torch.save(result_dict, args.filename) if __name__ == "__main__": main()
mit
brguez/TEIBA
src/python/retrotranspositionRate_tumorTypes.py
1
5005
#!/usr/bin/env python #coding: utf-8 def header(string): """ Display header """ timeInfo = time.strftime("%Y-%m-%d %H:%M") print '\n', timeInfo, "****", string, "****" def info(string): """ Display basic information """ timeInfo = time.strftime("%Y-%m-%d %H:%M") print timeInfo, string ####### CLASSES ####### class cohort(): """ ..................... Methods: - """ def __init__(self): """ """ self.VCFdict = {} def read_VCFs(self, inputPath): """ """ inputFile = open(inputPath, 'r') info("Read input VCFs ") # Per iteration, read a VCF, generate a VCF object and add it to the cohort for line in inputFile: line = line.rstrip('\n') line = line.split("\t") donorId = line[0] projectCode = line[1].split("-")[0] VCFfile = line[2] #print "tiooo: ", donorId, projectCode, VCFfile # Create VCF object VCFObj = formats.VCF() info("Reading " + VCFfile + "...") # Input VCF available if os.path.isfile(VCFfile): # Read VCF and add information to VCF object VCFObj.read_VCF(VCFfile) # Initialize the donor list for a given project if needed if projectCode not in self.VCFdict: self.VCFdict[projectCode] = [] # Add donor VCF to cohort self.VCFdict[projectCode].append(VCFObj) else: print "[ERROR] Input file does not exist" #### MAIN #### ## Import modules ## import argparse import sys import os.path import formats import time from operator import itemgetter, attrgetter, methodcaller import pandas as pd import numpy as np from matplotlib import pyplot as plt import matplotlib.patches as mpatches ## Get user's input ## parser = argparse.ArgumentParser(description= """""") parser.add_argument('inputPath', help='Tabular text file containing one row per donor with the following consecutive fields: projectCode donorId vcf_path') parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.') args = parser.parse_args() inputPath = args.inputPath outDir = args.outDir scriptName = os.path.basename(sys.argv[0]) ## Display configuration to standard output ## print print "***** ", scriptName, " configuration *****" print "inputPath: ", inputPath print "outDir: ", outDir print print "***** Executing ", scriptName, ".... *****" print ## Start ## ### 1. Initialize cohort object cohortObj = cohort() ### 2. Read VCF files, create VCF objects and organize them cohortObj.read_VCFs(inputPath) ### 3. Make a dictionary containing per tumor type the total number of retrotransposition events and the retrotransposition rate # Retrotransposition rate defined as the average number of retrotransposition events for the donors in a given tumor rtTumorTypeDict = {} totalNbEvents = 0 totalNbL1Events = 0 ## For each tumor type for projectCode in cohortObj.VCFdict: ## Initialize category counts rtTumorTypeDict[projectCode] = {} nbEvents = 0 nbDonors = 0 ## For each donor for VCFObj in cohortObj.VCFdict[projectCode]: nbDonors += 1 # For each MEI for MEIObj in VCFObj.lineList: ## Select only those MEI that pass all the filters: if (MEIObj.filter == "PASS"): totalNbEvents += 1 nbEvents += 1 ## Select only L1 events: # - TD1 (partnered_transductions) # - TD2 (orphan_transductions) # - TD0 L1 (solo-L1) if (MEIObj.infoDict["TYPE"] == "TD1") or (MEIObj.infoDict["TYPE"] == "TD2") or ((MEIObj.infoDict["TYPE"] == "TD0") and (MEIObj.infoDict["CLASS"]=="L1")): totalNbL1Events += 1 # Retrotransposition rate (average number of events per donor): rtRate = float(nbEvents) / float(nbDonors) # Save into dictionary: rtTumorTypeDict[projectCode]["nbDonors"] = nbDonors rtTumorTypeDict[projectCode]["nbEvents"] = nbEvents rtTumorTypeDict[projectCode]["rtRate"] = rtRate print "totalNbEvents: ", totalNbEvents print "totalNbL1Events: ", totalNbL1Events print "rtTumorTypeDict: ", rtTumorTypeDict ### 4. Make dataframe with the info gathered in 3 #  nbDonors nbEvents rtRate # ProjectCode1 X1 Y1 Z1 # ProjectCode2 X2 Y2 Z2 # ... # ProjectCodeN rtTumorTypeDataFrame = pd.DataFrame(rtTumorTypeDict).transpose() # Save output into tsv outFilePath = outDir + '/nbDonors_nbEvents_rtRate_perTumorType.tsv' rtTumorTypeDataFrame.to_csv(outFilePath, sep='\t') ## End ## print print "***** Finished! *****" print
gpl-3.0
maniteja123/scipy
scipy/special/c_misc/struve_convergence.py
23
3678
""" Convergence regions of the expansions used in ``struve.c`` Note that for v >> z both functions tend rapidly to 0, and for v << -z, they tend to infinity. The floating-point functions over/underflow in the lower left and right corners of the figure. Figure legend ============= Red region Power series is close (1e-12) to the mpmath result Blue region Asymptotic series is close to the mpmath result Green region Bessel series is close to the mpmath result Dotted colored lines Boundaries of the regions Solid colored lines Boundaries estimated by the routine itself. These will be used for determining which of the results to use. Black dashed line The line z = 0.7*|v| + 12 """ from __future__ import absolute_import, division, print_function import numpy as np import matplotlib.pyplot as plt import mpmath def err_metric(a, b, atol=1e-290): m = abs(a - b) / (atol + abs(b)) m[np.isinf(b) & (a == b)] = 0 return m def do_plot(is_h=True): from scipy.special._ufuncs import \ _struve_power_series, _struve_asymp_large_z, _struve_bessel_series vs = np.linspace(-1000, 1000, 91) zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]]) rp = _struve_power_series(vs[:,None], zs[None,:], is_h) ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h) rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h) mpmath.mp.dps = 50 if is_h: sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z))) else: sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z))) ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:]) err_a = err_metric(ra[0], ex) + 1e-300 err_p = err_metric(rp[0], ex) + 1e-300 err_b = err_metric(rb[0], ex) + 1e-300 err_est_a = abs(ra[1]/ra[0]) err_est_p = abs(rp[1]/rp[0]) err_est_b = abs(rb[1]/rb[0]) z_cutoff = 0.7*abs(vs) + 12 levels = [-1000, -12] plt.cla() plt.hold(1) plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1) plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1) plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1) plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':']) plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':']) plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':']) lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-']) la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-']) lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-']) plt.clabel(lp, fmt={-1000: 'P', -12: 'P'}) plt.clabel(la, fmt={-1000: 'A', -12: 'A'}) plt.clabel(lb, fmt={-1000: 'B', -12: 'B'}) plt.plot(vs, z_cutoff, 'k--') plt.xlim(vs.min(), vs.max()) plt.ylim(zs.min(), zs.max()) plt.xlabel('v') plt.ylabel('z') def main(): plt.clf() plt.subplot(121) do_plot(True) plt.title('Struve H') plt.subplot(122) do_plot(False) plt.title('Struve L') plt.savefig('struve_convergence.png') plt.show() if __name__ == "__main__": import os import sys if '--main' in sys.argv: main() else: import subprocess subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'), '-g', '--python', __file__, '--main'])
bsd-3-clause
bsipocz/scikit-image
doc/examples/plot_brief.py
32
1879
""" ======================= BRIEF binary descriptor ======================= This example demonstrates the BRIEF binary description algorithm. The descriptor consists of relatively few bits and can be computed using a set of intensity difference tests. The short binary descriptor results in low memory footprint and very efficient matching based on the Hamming distance metric. BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by detecting and extracting features at different scales. """ from skimage import data from skimage import transform as tf from skimage.feature import (match_descriptors, corner_peaks, corner_harris, plot_matches, BRIEF) from skimage.color import rgb2gray import matplotlib.pyplot as plt img1 = rgb2gray(data.astronaut()) tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100)) img2 = tf.warp(img1, tform) img3 = tf.rotate(img1, 25) keypoints1 = corner_peaks(corner_harris(img1), min_distance=5) keypoints2 = corner_peaks(corner_harris(img2), min_distance=5) keypoints3 = corner_peaks(corner_harris(img3), min_distance=5) extractor = BRIEF() extractor.extract(img1, keypoints1) keypoints1 = keypoints1[extractor.mask] descriptors1 = extractor.descriptors extractor.extract(img2, keypoints2) keypoints2 = keypoints2[extractor.mask] descriptors2 = extractor.descriptors extractor.extract(img3, keypoints3) keypoints3 = keypoints3[extractor.mask] descriptors3 = extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True) fig, ax = plt.subplots(nrows=2, ncols=1) plt.gray() plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12) ax[0].axis('off') plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13) ax[1].axis('off') plt.show()
bsd-3-clause
arabenjamin/scikit-learn
examples/cross_decomposition/plot_compare_cross_decomposition.py
142
4761
""" =================================== Compare cross decomposition methods =================================== Simple usage of various cross decomposition algorithms: - PLSCanonical - PLSRegression, with multivariate response, a.k.a. PLS2 - PLSRegression, with univariate response, a.k.a. PLS1 - CCA Given 2 multivariate covarying two-dimensional datasets, X, and Y, PLS extracts the 'directions of covariance', i.e. the components of each datasets that explain the most shared variance between both datasets. This is apparent on the **scatterplot matrix** display: components 1 in dataset X and dataset Y are maximally correlated (points lie around the first diagonal). This is also true for components 2 in both dataset, however, the correlation across datasets for different components is weak: the point cloud is very spherical. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA ############################################################################### # Dataset based latent variables model n = 500 # 2 latents vars: l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X_train = X[:n / 2] Y_train = Y[:n / 2] X_test = X[n / 2:] Y_test = Y[n / 2:] print("Corr(X)") print(np.round(np.corrcoef(X.T), 2)) print("Corr(Y)") print(np.round(np.corrcoef(Y.T), 2)) ############################################################################### # Canonical (symmetric) PLS # Transform data # ~~~~~~~~~~~~~~ plsca = PLSCanonical(n_components=2) plsca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test) # Scatter plot of scores # ~~~~~~~~~~~~~~~~~~~~~~ # 1) On diagonal plot X vs Y scores on each components plt.figure(figsize=(12, 8)) plt.subplot(221) plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train") plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 1: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") plt.subplot(224) plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train") plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 2: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") # 2) Off diagonal plot components 1 vs 2 for X and Y plt.subplot(222) plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train") plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test") plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.subplot(223) plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train") plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test") plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.show() ############################################################################### # PLS regression, with multivariate response, a.k.a. PLS2 n = 1000 q = 3 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) B = np.array([[1, 2] + [0] * (p - 2)] * q).T # each Yj = 1*X1 + 2*X2 + noize Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5 pls2 = PLSRegression(n_components=3) pls2.fit(X, Y) print("True B (such that: Y = XB + Err)") print(B) # compare pls2.coefs with B print("Estimated B") print(np.round(pls2.coefs, 1)) pls2.predict(X) ############################################################################### # PLS regression, with univariate response, a.k.a. PLS1 n = 1000 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5 pls1 = PLSRegression(n_components=3) pls1.fit(X, y) # note that the number of compements exceeds 1 (the dimension of y) print("Estimated betas") print(np.round(pls1.coefs, 1)) ############################################################################### # CCA (PLS mode B with symmetric deflation) cca = CCA(n_components=2) cca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
bsd-3-clause
DmitryUlyanov/Multicore-TSNE
MulticoreTSNE/examples/test.py
1
2679
import gzip import pickle import numpy as np import matplotlib from cycler import cycler import urllib import os import sys from MulticoreTSNE import MulticoreTSNE as TSNE matplotlib.use('Agg') import matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser() parser.add_argument("--n_jobs", help='Number of threads', default=1, type=int) parser.add_argument("--n_objects", help='How many objects to use from MNIST', default=-1, type=int) parser.add_argument("--n_components", help='T-SNE dimensionality', default=2, type=int) args = parser.parse_args() def get_mnist(): if not os.path.exists('mnist.pkl.gz'): print('downloading MNIST') if sys.version_info >= (3, 0): urllib.request.urlretrieve( 'http://deeplearning.net/data/mnist/mnist.pkl.gz', 'mnist.pkl.gz') else: urllib.urlretrieve( 'http://deeplearning.net/data/mnist/mnist.pkl.gz', 'mnist.pkl.gz') print('downloaded') f = gzip.open("mnist.pkl.gz", "rb") if sys.version_info >= (3, 0): train, val, test = pickle.load(f, encoding='latin1') else: train, val, test = pickle.load(f) f.close() # Get all data in one array _train = np.asarray(train[0], dtype=np.float64) _val = np.asarray(val[0], dtype=np.float64) _test = np.asarray(test[0], dtype=np.float64) mnist = np.vstack((_train, _val, _test)) # Also the classes, for labels in the plot later classes = np.hstack((train[1], val[1], test[1])) return mnist, classes def plot(Y, classes, name): digits = set(classes) fig = plt.figure() colormap = plt.cm.spectral plt.gca().set_prop_cycle( cycler('color', [colormap(i) for i in np.linspace(0, 0.9, 10)])) ax = fig.add_subplot(111) labels = [] for d in digits: idx = classes == d if Y.shape[1] == 1: ax.plot(Y[idx], np.random.randn(Y[idx].shape[0]), 'o') else: ax.plot(Y[idx, 0], Y[idx, 1], 'o') labels.append(d) ax.legend(labels, numpoints=1, fancybox=True) fig.savefig(name) if Y.shape[1] > 2: print('Warning! Plot shows only first two components!') ################################################################ mnist, classes = get_mnist() if args.n_objects != -1: mnist = mnist[:args.n_objects] classes = classes[:args.n_objects] tsne = TSNE(n_jobs=int(args.n_jobs), verbose=1, n_components=args.n_components, random_state=660) mnist_tsne = tsne.fit_transform(mnist) filename = 'mnist_tsne_n_comp=%d.png' % args.n_components plot(mnist_tsne, classes, filename) print('Plot saved to %s' % filename)
bsd-3-clause
mlyundin/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
127
7477
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
ntucllab/striatum
simulation/simulation_exp4p.py
1
3289
import six from six.moves import range, zip from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.multiclass import OneVsRestClassifier import numpy as np import matplotlib.pyplot as plt from striatum.storage import MemoryHistoryStorage, MemoryModelStorage from striatum.bandit import Exp4P from striatum.bandit.bandit import Action from striatum import simulation def train_expert(history_context, history_action): n_round = len(history_context) history_context = np.array([history_context[t] for t in range(n_round)]) history_action = np.array([history_action[t] for t in range(n_round)]) logreg = OneVsRestClassifier(LogisticRegression()) mnb = OneVsRestClassifier(MultinomialNB()) logreg.fit(history_context, history_action) mnb.fit(history_context, history_action) return [logreg, mnb] def get_advice(context, action_ids, experts): advice = {} for t, context_t in six.viewitems(context): advice[t] = {} for exp_i, expert in enumerate(experts): prob = expert.predict_proba(context_t[np.newaxis, :])[0] advice[t][exp_i] = {} for action_id, action_prob in zip(action_ids, prob): advice[t][exp_i][action_id] = action_prob return advice def main(): # pylint: disable=too-many-locals n_rounds = 1000 context_dimension = 5 actions = [Action(i) for i in range(5)] action_ids = [0, 1, 2, 3, 4] context1, desired_actions1 = simulation.simulate_data( 3000, context_dimension, actions, "Exp4P", random_state=0) experts = train_expert(context1, desired_actions1) # Parameter tuning tuning_region = np.arange(0.01, 1, 0.05) ctr_tuning = np.empty(len(tuning_region)) advice1 = get_advice(context1, action_ids, experts) for delta_i, delta in enumerate(tuning_region): historystorage = MemoryHistoryStorage() modelstorage = MemoryModelStorage() policy = Exp4P(actions, historystorage, modelstorage, delta=delta, p_min=None) cum_regret = simulation.evaluate_policy(policy, advice1, desired_actions1) ctr_tuning[delta_i] = n_rounds - cum_regret[-1] ctr_tuning /= n_rounds delta_opt = tuning_region[np.argmax(ctr_tuning)] simulation.plot_tuning_curve(tuning_region, ctr_tuning, label="delta changes") # Regret Analysis n_rounds = 10000 context2, desired_actions2 = simulation.simulate_data( n_rounds, context_dimension, actions, "Exp4P", random_state=1) advice2 = get_advice(context2, action_ids, experts) historystorage = MemoryHistoryStorage() modelstorage = MemoryModelStorage() policy = Exp4P(actions, historystorage, modelstorage, delta=delta_opt, p_min=None) for t in range(n_rounds): history_id, action = policy.get_action(advice2[t], 1) action_id = action[0]['action'].action_id if desired_actions2[t] != action_id: policy.reward(history_id, {action_id: 0}) else: policy.reward(history_id, {action_id: 1}) policy.plot_avg_regret() plt.show() if __name__ == '__main__': main()
bsd-2-clause
tsurumeso/pysparcl
pysparcl/cluster.py
1
3666
import numpy as np import six from sklearn.cluster import KMeans from pysparcl import subfunc from pysparcl import utils def kmeans(x, k=None, wbounds=None, n_init=20, max_iter=6, centers=None, verbose=False): n, p = x.shape if k is None and centers is None: raise ValueError('k and centers are None.') if k is not None and centers is not None: if centers.shape[0] != k or centers.shape[1] != p: raise ValueError('Invalid shape of centers.') if wbounds is None: wbounds = np.linspace(1.1, np.sqrt(p), 20) if np.isscalar(wbounds): wbounds = np.asarray([wbounds]) if wbounds.min() <= 1: raise ValueError('Each wbound must be > 1.') if centers is not None: cs = KMeans(centers.shape[0], init=centers, n_init=1).fit(x).labels_ else: cs = KMeans(k, init='random', n_init=n_init).fit(x).labels_ out = [] for i in range(len(wbounds)): ws = np.ones(p) * (1 / np.sqrt(p)) ws_old = np.random.standard_normal(p) bcss_ws = None niter = 0 while (np.sum(np.abs(ws - ws_old)) / np.sum(np.abs(ws_old)) > 1e-4 and niter < max_iter): niter += 1 ws_old = ws if niter > 1: if k is not None: cs = subfunc._update_cs(x, k, ws, cs) else: cs = subfunc._update_cs(x, centers.shape[0], ws, cs) ws = subfunc._update_ws(x, cs, wbounds[i]) bcss_ws = np.sum(subfunc._get_wcss(x, cs)[1] * ws) result = {'ws': ws, 'cs': cs, 'bcss_ws': bcss_ws, 'wbound': wbounds[i]} out.append(result) if verbose: six.print_('*-------------------------------------------------*') six.print_('iter:', i + 1) six.print_('wbound:', wbounds[i]) six.print_('number of non-zero weights:', np.count_nonzero(ws)) six.print_('sum of weights:', np.sum(ws), flush=True) return out def permute(x, k=None, nperms=25, wbounds=None, nvals=10, centers=None, verbose=False): n, p = x.shape if k is None and centers is None: raise ValueError('k and centers are None.') if k is not None and centers is not None: if centers.shape[0] != k or centers.shape[1] != p: raise ValueError('Invalid shape of centers.') if wbounds is None: wbounds = np.exp( np.linspace(np.log(1.2), np.log(np.sqrt(p) * 0.9), nvals)) if wbounds.min() <= 1 or len(wbounds) < 2: raise ValueError('len(wbounds) and each wbound must be > 1.') permx = np.zeros((nperms, n, p)) nnonzerows = None for i in range(nperms): for j in range(p): permx[i, :, j] = np.random.permutation(x[:, j]) tots = None out = kmeans(x, k, wbounds, centers=centers, verbose=verbose) for i in range(len(out)): nnonzerows = utils._cbind(nnonzerows, np.sum(out[i]['ws'] != 0)) bcss = subfunc._get_wcss(x, out[i]['cs'])[1] tots = utils._cbind(tots, np.sum(out[i]['ws'] * bcss)) permtots = np.zeros((len(wbounds), nperms)) for i in range(nperms): perm_out = kmeans( permx[i], k, wbounds, centers=centers, verbose=verbose) for j in range(len(perm_out)): perm_bcss = subfunc._get_wcss(permx[i], perm_out[j]['cs'])[1] permtots[j, i] = np.sum(perm_out[j]['ws'] * perm_bcss) gaps = np.log(tots) - np.log(permtots).mean(axis=1) bestw = wbounds[gaps.argmax()] out = {'bestw': bestw, 'gaps': gaps, 'wbounds': wbounds, 'nnonzerows': nnonzerows} return out
gpl-2.0
davidgardenier/frbpoppy
tests/beam/theory.py
1
1311
"""Plot intensity profile of theoretical beam patterns.""" import matplotlib.pyplot as plt import numpy as np from frbpoppy.survey import Survey from tests.convenience import plot_aa_style, rel_path PATTERNS = ['perfect', 'gaussian', 'airy-0', 'airy-4'] SURVEY = 'wsrt-apertif' MIN_Y = 1e-6 n = 500000 plot_aa_style() for pattern in PATTERNS: n_sidelobes = 1 p = pattern z = 0 if pattern.startswith('perfect'): n_sidelobes = 0.5 if pattern.startswith('airy'): n_sidelobes = int(pattern[-1]) p = 'airy' if n_sidelobes == 0: z = 10 s = Survey(SURVEY) s.set_beam(model=p, n_sidelobes=n_sidelobes) int_pro, offset = s.calc_beam(shape=n) # Sort the values sorted_int = np.argsort(offset) int_pro = int_pro[sorted_int] offset = offset[sorted_int] # Clean up lower limit offset = offset[int_pro > MIN_Y] int_pro = int_pro[int_pro > MIN_Y] print(f'Beam size at FWHM: {s.beam_size_at_fwhm}') print(f'Beam size with {n_sidelobes} sidelobes: {s.beam_size}') plt.plot(offset, int_pro, label=pattern, zorder=z) plt.xlabel(r'Offset ($^{\circ}$)') plt.ylabel('Intensity Profile') plt.yscale('log') plt.legend(loc='upper right') plt.tight_layout() plt.savefig(rel_path('plots/beam_int_theory.pdf'))
mit
krikru/tensorflow-opencl
tensorflow/contrib/learn/python/learn/estimators/linear_test.py
6
64222
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for estimators.linear.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import json import sys import tempfile # TODO: #6568 Remove this hack that makes dlopen() not crash. if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'): import ctypes sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) import numpy as np from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.learn.python.learn import experiment from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils from tensorflow.contrib.learn.python.learn.estimators import linear from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.training import ftrl from tensorflow.python.training import input as input_lib from tensorflow.python.training import server_lib def _prepare_iris_data_for_logistic_regression(): # Converts iris data to a logistic regression problem. iris = base.load_iris() ids = np.where((iris.target == 0) | (iris.target == 1)) iris = base.Dataset(data=iris.data[ids], target=iris.target[ids]) return iris class LinearClassifierTest(test.TestCase): def testExperimentIntegration(self): cont_features = [ feature_column_lib.real_valued_column( 'feature', dimension=4) ] exp = experiment.Experiment( estimator=linear.LinearClassifier( n_classes=3, feature_columns=cont_features), train_input_fn=test_data.iris_input_multiclass_fn, eval_input_fn=test_data.iris_input_multiclass_fn) exp.test() def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract(self, linear.LinearClassifier) def testTrain(self): """Tests that loss goes down with training.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier(feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) self.assertLess(loss2, 0.01) def testJointTrain(self): """Tests that loss goes down with training with joint weights.""" def input_fn(): return { 'age': sparse_tensor.SparseTensor( values=['1'], indices=[[0, 0]], dense_shape=[1, 1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.sparse_column_with_hash_bucket('age', 2) classifier = linear.LinearClassifier( _joint_weight=True, feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) self.assertLess(loss2, 0.01) def testMultiClass_MatrixData(self): """Tests multi-class classification using matrix data as input.""" feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier( n_classes=3, feature_columns=[feature_column]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) self.assertGreater(scores['accuracy'], 0.9) def testMultiClass_MatrixData_Labels1D(self): """Same as the last test, but labels shape is [150] instead of [150, 1].""" def _input_fn(): iris = base.load_iris() return { 'feature': constant_op.constant( iris.data, dtype=dtypes.float32) }, constant_op.constant( iris.target, shape=[150], dtype=dtypes.int32) feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier( n_classes=3, feature_columns=[feature_column]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testMultiClass_NpMatrixData(self): """Tests multi-class classification using numpy matrix data as input.""" iris = base.load_iris() train_x = iris.data train_y = iris.target feature_column = feature_column_lib.real_valued_column('', dimension=4) classifier = linear.LinearClassifier( n_classes=3, feature_columns=[feature_column]) classifier.fit(x=train_x, y=train_y, steps=100) scores = classifier.evaluate(x=train_x, y=train_y, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testLogisticRegression_MatrixData(self): """Tests binary classification using matrix data as input.""" def _input_fn(): iris = _prepare_iris_data_for_logistic_regression() return { 'feature': constant_op.constant( iris.data, dtype=dtypes.float32) }, constant_op.constant( iris.target, shape=[100, 1], dtype=dtypes.int32) feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier(feature_columns=[feature_column]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testLogisticRegression_MatrixData_Labels1D(self): """Same as the last test, but labels shape is [100] instead of [100, 1].""" def _input_fn(): iris = _prepare_iris_data_for_logistic_regression() return { 'feature': constant_op.constant( iris.data, dtype=dtypes.float32) }, constant_op.constant( iris.target, shape=[100], dtype=dtypes.int32) feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier(feature_columns=[feature_column]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testLogisticRegression_NpMatrixData(self): """Tests binary classification using numpy matrix data as input.""" iris = _prepare_iris_data_for_logistic_regression() train_x = iris.data train_y = iris.target feature_columns = [feature_column_lib.real_valued_column('', dimension=4)] classifier = linear.LinearClassifier(feature_columns=feature_columns) classifier.fit(x=train_x, y=train_y, steps=100) scores = classifier.evaluate(x=train_x, y=train_y, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testWeightAndBiasNames(self): """Tests that weight and bias names haven't changed.""" feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier( n_classes=3, feature_columns=[feature_column]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) self.assertEqual(4, len(classifier.weights_)) self.assertEqual(3, len(classifier.bias_)) def testCustomOptimizerByObject(self): """Tests multi-class classification using matrix data as input.""" feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier( n_classes=3, optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), feature_columns=[feature_column]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) self.assertGreater(scores['accuracy'], 0.9) def testCustomOptimizerByString(self): """Tests multi-class classification using matrix data as input.""" feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) def _optimizer(): return ftrl.FtrlOptimizer(learning_rate=0.1) classifier = linear.LinearClassifier( n_classes=3, optimizer=_optimizer, feature_columns=[feature_column]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) self.assertGreater(scores['accuracy'], 0.9) def testCustomOptimizerByFunction(self): """Tests multi-class classification using matrix data as input.""" feature_column = feature_column_lib.real_valued_column( 'feature', dimension=4) classifier = linear.LinearClassifier( n_classes=3, optimizer='Ftrl', feature_columns=[feature_column]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) self.assertGreater(scores['accuracy'], 0.9) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): # For the case of binary classification, the 2nd column of "predictions" # denotes the model predictions. predictions = array_ops.strided_slice( predictions, [0, 1], [-1, 2], end_mask=1) return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) classifier = linear.LinearClassifier( feature_columns=[feature_column_lib.real_valued_column('x')]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'my_accuracy': MetricSpec( metric_fn=metric_ops.streaming_accuracy, prediction_key='classes'), 'my_precision': MetricSpec( metric_fn=metric_ops.streaming_precision, prediction_key='classes'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='probabilities') }) self.assertTrue( set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset( set(scores.keys()))) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(classifier.predict_classes( input_fn=predict_input_fn))) self.assertEqual( _sklearn.accuracy_score([1, 0, 0, 0], predictions), scores['my_accuracy']) # Tests the case where the prediction_key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) # Tests the case where the 2nd element of the key is neither "classes" nor # "probabilities". with self.assertRaises(KeyError): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc}) # Tests the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ ('bad_length_name', 'classes', 'bad_length'): metric_ops.streaming_accuracy }) def testLogisticFractionalLabels(self): """Tests logistic training with fractional labels.""" def input_fn(num_epochs=None): return { 'age': input_lib.limit_epochs( constant_op.constant([[1], [2]]), num_epochs=num_epochs), }, constant_op.constant( [[.7], [0]], dtype=dtypes.float32) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier( feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=input_fn, steps=500) predict_input_fn = functools.partial(input_fn, num_epochs=1) predictions_proba = list( classifier.predict_proba(input_fn=predict_input_fn)) # Prediction probabilities mirror the labels column, which proves that the # classifier learns from float input. self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[1], [0], [0]]) return features, labels sparse_features = [ # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) ] tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) classifier = linear.LinearClassifier( feature_columns=sparse_features, config=config) classifier.fit(input_fn=_input_fn, steps=200) loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss'] self.assertLess(loss, 0.07) def testTrainSaveLoad(self): """Tests that insures you can save and reload a trained model.""" def input_fn(num_epochs=None): return { 'age': input_lib.limit_epochs( constant_op.constant([1]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]), }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') model_dir = tempfile.mkdtemp() classifier = linear.LinearClassifier( model_dir=model_dir, feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=30) predict_input_fn = functools.partial(input_fn, num_epochs=1) out1_class = list( classifier.predict_classes( input_fn=predict_input_fn, as_iterable=True)) out1_proba = list( classifier.predict_proba( input_fn=predict_input_fn, as_iterable=True)) del classifier classifier2 = linear.LinearClassifier( model_dir=model_dir, feature_columns=[age, language]) out2_class = list( classifier2.predict_classes( input_fn=predict_input_fn, as_iterable=True)) out2_proba = list( classifier2.predict_proba( input_fn=predict_input_fn, as_iterable=True)) self.assertTrue(np.array_equal(out1_class, out2_class)) self.assertTrue(np.array_equal(out1_proba, out2_proba)) def testWeightColumn(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x) labels = constant_op.constant([[1], [1], [1], [1]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels classifier = linear.LinearClassifier( weight_column_name='w', feature_columns=[feature_column_lib.real_valued_column('x')], config=run_config.RunConfig(tf_random_seed=3)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) # All examples in eval data set are y=x. self.assertGreater(scores['labels/actual_label_mean'], 0.9) # If there were no weight column, model would learn y=Not(x). Because of # weights, it learns y=x. self.assertGreater(scores['labels/prediction_mean'], 0.9) # All examples in eval data set are y=x. So if weight column were ignored, # then accuracy would be zero. Because of weights, accuracy should be close # to 1.0. self.assertGreater(scores['accuracy'], 0.9) scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1) # Considering weights, the mean label should be close to 1.0. # If weights were ignored, it would be 0.25. self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9) # The classifier has learned y=x. If weight column were ignored in # evaluation, then accuracy for the train set would be 0.25. # Because weight is not ignored, accuracy is greater than 0.6. self.assertGreater(scores_train_set['accuracy'], 0.6) def testWeightColumnLoss(self): """Test ensures that you can specify per-example weights for loss.""" def _input_fn(): features = { 'age': constant_op.constant([[20], [20], [20]]), 'weights': constant_op.constant([[100], [1], [1]]), } labels = constant_op.constant([[1], [0], [0]]) return features, labels age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier(feature_columns=[age]) classifier.fit(input_fn=_input_fn, steps=100) loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss'] classifier = linear.LinearClassifier( feature_columns=[age], weight_column_name='weights') classifier.fit(input_fn=_input_fn, steps=100) loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss'] self.assertLess(loss_weighted, loss_unweighted) def testExport(self): """Tests that export model for servo works.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier(feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) export_dir = tempfile.mkdtemp() classifier.export(export_dir) def testDisableCenteredBias(self): """Tests that we can disable centered bias.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier( feature_columns=[age, language], enable_centered_bias=False) classifier.fit(input_fn=input_fn, steps=100) self.assertNotIn('centered_bias_weight', classifier.get_variable_names()) def testEnableCenteredBias(self): """Tests that we can disable centered bias.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier( feature_columns=[age, language], enable_centered_bias=True) classifier.fit(input_fn=input_fn, steps=100) self.assertIn('centered_bias_weight', classifier.get_variable_names()) def testTrainOptimizerWithL1Reg(self): """Tests l1 regularized model has higher loss.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) classifier_no_reg = linear.LinearClassifier(feature_columns=[language]) classifier_with_reg = linear.LinearClassifier( feature_columns=[language], optimizer=ftrl.FtrlOptimizer( learning_rate=1.0, l1_regularization_strength=100.)) loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate( input_fn=input_fn, steps=1)['loss'] loss_with_reg = classifier_with_reg.fit(input_fn=input_fn, steps=100).evaluate( input_fn=input_fn, steps=1)['loss'] self.assertLess(loss_no_reg, loss_with_reg) def testTrainWithMissingFeature(self): """Tests that training works with missing features.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['Swahili', 'turkish'], indices=[[0, 0], [2, 0]], dense_shape=[3, 1]) }, constant_op.constant([[1], [1], [1]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) classifier = linear.LinearClassifier(feature_columns=[language]) classifier.fit(input_fn=input_fn, steps=100) loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss, 0.07) def testSdcaOptimizerRealValuedFeatures(self): """Tests LinearClasssifier with SDCAOptimizer and real valued features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2']), 'maintenance_cost': constant_op.constant([[500.0], [200.0]]), 'sq_footage': constant_op.constant([[800.0], [600.0]]), 'weights': constant_op.constant([[1.0], [1.0]]) }, constant_op.constant([[0], [1]]) maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost') sq_footage = feature_column_lib.real_valued_column('sq_footage') sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') classifier = linear.LinearClassifier( feature_columns=[maintenance_cost, sq_footage], weight_column_name='weights', optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=100) loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss, 0.05) def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self): """Tests SDCAOptimizer with real valued features of higher dimension.""" # input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures # where 2 1-dimensional dense features have been replaced by 1 2-dimensional # feature. def input_fn(): return { 'example_id': constant_op.constant(['1', '2']), 'dense_feature': constant_op.constant([[500.0, 800.0], [200.0, 600.0]]) }, constant_op.constant([[0], [1]]) dense_feature = feature_column_lib.real_valued_column( 'dense_feature', dimension=2) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') classifier = linear.LinearClassifier( feature_columns=[dense_feature], optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=100) loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss, 0.05) def testSdcaOptimizerBucketizedFeatures(self): """Tests LinearClasssifier with SDCAOptimizer and bucketized features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'price': constant_op.constant([[600.0], [1000.0], [400.0]]), 'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]), 'weights': constant_op.constant([[1.0], [1.0], [1.0]]) }, constant_op.constant([[1], [0], [1]]) price_bucket = feature_column_lib.bucketized_column( feature_column_lib.real_valued_column('price'), boundaries=[500.0, 700.0]) sq_footage_bucket = feature_column_lib.bucketized_column( feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0]) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id', symmetric_l2_regularization=1.0) classifier = linear.LinearClassifier( feature_columns=[price_bucket, sq_footage_bucket], weight_column_name='weights', optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=50) scores = classifier.evaluate(input_fn=input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testSdcaOptimizerSparseFeatures(self): """Tests LinearClasssifier with SDCAOptimizer and sparse features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'price': constant_op.constant([[0.4], [0.6], [0.3]]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 3], [2, 1]], dense_shape=[3, 5]), 'weights': constant_op.constant([[1.0], [1.0], [1.0]]) }, constant_op.constant([[1], [0], [1]]) price = feature_column_lib.real_valued_column('price') country = feature_column_lib.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') classifier = linear.LinearClassifier( feature_columns=[price, country], weight_column_name='weights', optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=50) scores = classifier.evaluate(input_fn=input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testSdcaOptimizerWeightedSparseFeatures(self): """LinearClasssifier with SDCAOptimizer and weighted sparse features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'price': sparse_tensor.SparseTensor( values=[2., 3., 1.], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 5]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 5]) }, constant_op.constant([[1], [0], [1]]) country = feature_column_lib.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) country_weighted_by_price = feature_column_lib.weighted_sparse_column( country, 'price') sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') classifier = linear.LinearClassifier( feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=50) scores = classifier.evaluate(input_fn=input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testSdcaOptimizerCrossedFeatures(self): """Tests LinearClasssifier with SDCAOptimizer and crossed features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'language': sparse_tensor.SparseTensor( values=['english', 'italian', 'spanish'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), 'country': sparse_tensor.SparseTensor( values=['US', 'IT', 'MX'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]) }, constant_op.constant([[0], [0], [1]]) language = feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=5) country = feature_column_lib.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) country_language = feature_column_lib.crossed_column( [language, country], hash_bucket_size=10) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') classifier = linear.LinearClassifier( feature_columns=[country_language], optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=10) scores = classifier.evaluate(input_fn=input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testSdcaOptimizerMixedFeatures(self): """Tests LinearClasssifier with SDCAOptimizer and a mix of features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'price': constant_op.constant([[0.6], [0.8], [0.3]]), 'sq_footage': constant_op.constant([[900.0], [700.0], [600.0]]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 3], [2, 1]], dense_shape=[3, 5]), 'weights': constant_op.constant([[3.0], [1.0], [1.0]]) }, constant_op.constant([[1], [0], [1]]) price = feature_column_lib.real_valued_column('price') sq_footage_bucket = feature_column_lib.bucketized_column( feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0, 800.0]) country = feature_column_lib.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) sq_footage_country = feature_column_lib.crossed_column( [sq_footage_bucket, country], hash_bucket_size=10) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') classifier = linear.LinearClassifier( feature_columns=[price, sq_footage_bucket, country, sq_footage_country], weight_column_name='weights', optimizer=sdca_optimizer) classifier.fit(input_fn=input_fn, steps=50) scores = classifier.evaluate(input_fn=input_fn, steps=1) self.assertGreater(scores['accuracy'], 0.9) def testEval(self): """Tests that eval produces correct metrics. """ def input_fn(): return { 'age': constant_op.constant([[1], [2]]), 'language': sparse_tensor.SparseTensor( values=['greek', 'chinese'], indices=[[0, 0], [1, 0]], dense_shape=[2, 1]), }, constant_op.constant([[1], [0]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearClassifier(feature_columns=[age, language]) # Evaluate on trained model classifier.fit(input_fn=input_fn, steps=100) classifier.evaluate(input_fn=input_fn, steps=1) # TODO(ispir): Enable accuracy check after resolving the randomness issue. # self.assertLess(evaluated_values['loss/mean'], 0.3) # self.assertGreater(evaluated_values['accuracy/mean'], .95) class LinearRegressorTest(test.TestCase): def testExperimentIntegration(self): cont_features = [ feature_column_lib.real_valued_column( 'feature', dimension=4) ] exp = experiment.Experiment( estimator=linear.LinearRegressor(feature_columns=cont_features), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor) def testRegression(self): """Tests that loss goes down with training.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[10.]]) language = feature_column_lib.sparse_column_with_hash_bucket('language', 100) age = feature_column_lib.real_valued_column('age') classifier = linear.LinearRegressor(feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) self.assertLess(loss2, 0.5) def testRegression_MatrixData(self): """Tests regression using matrix data as input.""" cont_features = [ feature_column_lib.real_valued_column( 'feature', dimension=4) ] regressor = linear.LinearRegressor( feature_columns=cont_features, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = regressor.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=1) self.assertLess(scores['loss'], 0.2) def testRegression_TensorData(self): """Tests regression using tensor data as input.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant( [1.0, 0., 0.2], dtype=dtypes.float32) feature_columns = [ feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20), feature_column_lib.real_valued_column('age') ] regressor = linear.LinearRegressor( feature_columns=feature_columns, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertLess(scores['loss'], 0.2) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels regressor = linear.LinearRegressor( feature_columns=[feature_column_lib.real_valued_column('x')], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_train, steps=1) # Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875 self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels def _input_fn_eval(): # 4 rows, with different weights. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } return features, labels regressor = linear.LinearRegressor( weight_column_name='w', feature_columns=[feature_column_lib.real_valued_column('x')], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125 self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x) labels = constant_op.constant([[1.], [1.], [1.], [1.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels regressor = linear.LinearRegressor( weight_column_name='w', feature_columns=[feature_column_lib.real_valued_column('x')], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # The model should learn (y = x) because of the weights, so the loss should # be close to zero. self.assertLess(scores['loss'], 0.1) def testPredict_AsIterableFalse(self): """Tests predict method with as_iterable=False.""" labels = [1.0, 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) feature_columns = [ feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20), feature_column_lib.real_valued_column('age') ] regressor = linear.LinearRegressor( feature_columns=feature_columns, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertLess(scores['loss'], 0.1) predicted_scores = regressor.predict_scores( input_fn=_input_fn, as_iterable=False) self.assertAllClose(labels, predicted_scores, atol=0.1) predictions = regressor.predict(input_fn=_input_fn, as_iterable=False) self.assertAllClose(predicted_scores, predictions) def testPredict_AsIterable(self): """Tests predict method with as_iterable=True.""" labels = [1.0, 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) feature_columns = [ feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20), feature_column_lib.real_valued_column('age') ] regressor = linear.LinearRegressor( feature_columns=feature_columns, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertLess(scores['loss'], 0.1) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predicted_scores = list( regressor.predict_scores( input_fn=predict_input_fn, as_iterable=True)) self.assertAllClose(labels, predicted_scores, atol=0.1) predictions = list( regressor.predict( input_fn=predict_input_fn, as_iterable=True)) self.assertAllClose(predicted_scores, predictions) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = linear.LinearRegressor( feature_columns=[feature_column_lib.real_valued_column('x')], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': MetricSpec( metric_fn=metric_ops.streaming_mean_squared_error, prediction_key='scores'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='scores') }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list( regressor.predict_scores(input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case where the prediction_key is not "scores". with self.assertRaisesRegexp(KeyError, 'bad_type'): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) # Tests the case where the 2nd element of the key is not "scores". with self.assertRaises(KeyError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('my_error', 'predictions'): metric_ops.streaming_mean_squared_error }) # Tests the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('bad_length_name', 'scores', 'bad_length'): metric_ops.streaming_mean_squared_error }) def testTrainSaveLoad(self): """Tests that insures you can save and reload a trained model.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant( [1.0, 0., 0.2], dtype=dtypes.float32) feature_columns = [ feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20), feature_column_lib.real_valued_column('age') ] model_dir = tempfile.mkdtemp() regressor = linear.LinearRegressor( model_dir=model_dir, feature_columns=feature_columns, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = list(regressor.predict_scores(input_fn=predict_input_fn)) del regressor regressor2 = linear.LinearRegressor( model_dir=model_dir, feature_columns=feature_columns) predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn)) self.assertAllClose(predictions, predictions2) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant( [1.0, 0., 0.2], dtype=dtypes.float32) feature_columns = [ # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7), feature_column_lib.real_valued_column('age') ] tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig(tf_random_seed=1) # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) regressor = linear.LinearRegressor( feature_columns=feature_columns, config=config) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertLess(scores['loss'], 0.1) def testDisableCenteredBias(self): """Tests that we can disable centered bias.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant( [1.0, 0., 0.2], dtype=dtypes.float32) feature_columns = [ feature_column_lib.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20), feature_column_lib.real_valued_column('age') ] regressor = linear.LinearRegressor( feature_columns=feature_columns, enable_centered_bias=False, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertLess(scores['loss'], 0.1) def testRecoverWeights(self): rng = np.random.RandomState(67) n = 1000 n_weights = 10 bias = 2 x = rng.uniform(-1, 1, (n, n_weights)) weights = 10 * rng.randn(n_weights) y = np.dot(x, weights) y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01) feature_columns = estimator.infer_real_valued_columns_from_input(x) regressor = linear.LinearRegressor( feature_columns=feature_columns, optimizer=ftrl.FtrlOptimizer(learning_rate=0.8)) regressor.fit(x, y, batch_size=64, steps=2000) # Have to flatten weights since they come in (x, 1) shape. self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1) # TODO(ispir): Disable centered_bias. # assert abs(bias - regressor.bias_) < 0.1 def testSdcaOptimizerRealValuedLinearFeatures(self): """Tests LinearRegressor with SDCAOptimizer and real valued features.""" x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]] weights = [[3.0], [-1.2], [0.5]] y = np.dot(x, weights) def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'x': constant_op.constant(x), 'weights': constant_op.constant([[10.0], [10.0], [10.0]]) }, constant_op.constant(y) x_column = feature_column_lib.real_valued_column('x', dimension=3) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') regressor = linear.LinearRegressor( feature_columns=[x_column], weight_column_name='weights', optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=20) loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss, 0.01) self.assertAllClose( [w[0] for w in weights], regressor.weights_.flatten(), rtol=0.1) def testSdcaOptimizerMixedFeaturesArbitraryWeights(self): """Tests LinearRegressor with SDCAOptimizer and a mix of features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'price': constant_op.constant([[0.6], [0.8], [0.3]]), 'sq_footage': constant_op.constant([[900.0], [700.0], [600.0]]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 3], [2, 1]], dense_shape=[3, 5]), 'weights': constant_op.constant([[3.0], [5.0], [7.0]]) }, constant_op.constant([[1.55], [-1.25], [-3.0]]) price = feature_column_lib.real_valued_column('price') sq_footage_bucket = feature_column_lib.bucketized_column( feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0, 800.0]) country = feature_column_lib.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) sq_footage_country = feature_column_lib.crossed_column( [sq_footage_bucket, country], hash_bucket_size=10) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id', symmetric_l2_regularization=1.0) regressor = linear.LinearRegressor( feature_columns=[price, sq_footage_bucket, country, sq_footage_country], weight_column_name='weights', optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=20) loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss, 0.05) def testSdcaOptimizerSparseFeaturesWithL1Reg(self): """Tests LinearClasssifier with SDCAOptimizer and sparse features.""" def input_fn(): return { 'example_id': constant_op.constant(['1', '2', '3']), 'price': constant_op.constant([[0.4], [0.6], [0.3]]), 'country': sparse_tensor.SparseTensor( values=['IT', 'US', 'GB'], indices=[[0, 0], [1, 3], [2, 1]], dense_shape=[3, 5]), 'weights': constant_op.constant([[10.0], [10.0], [10.0]]) }, constant_op.constant([[1.4], [-0.8], [2.6]]) price = feature_column_lib.real_valued_column('price') country = feature_column_lib.sparse_column_with_hash_bucket( 'country', hash_bucket_size=5) # Regressor with no L1 regularization. sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') regressor = linear.LinearRegressor( feature_columns=[price, country], weight_column_name='weights', optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=20) no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss'] no_l1_reg_weights = regressor.weights_ # Regressor with L1 regularization. sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id', symmetric_l1_regularization=1.0) regressor = linear.LinearRegressor( feature_columns=[price, country], weight_column_name='weights', optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=20) l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss'] l1_reg_weights = regressor.weights_ # Unregularized loss is lower when there is no L1 regularization. self.assertLess(no_l1_reg_loss, l1_reg_loss) self.assertLess(no_l1_reg_loss, 0.05) # But weights returned by the regressor with L1 regularization have smaller # L1 norm. l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0 for var_name in sorted(l1_reg_weights): l1_reg_weights_norm += sum( np.absolute(l1_reg_weights[var_name].flatten())) no_l1_reg_weights_norm += sum( np.absolute(no_l1_reg_weights[var_name].flatten())) print('Var name: %s, value: %s' % (var_name, no_l1_reg_weights[var_name].flatten())) self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm) def testSdcaOptimizerBiasOnly(self): """Tests LinearClasssifier with SDCAOptimizer and validates bias weight.""" def input_fn(): """Testing the bias weight when it's the only feature present. All of the instances in this input only have the bias feature, and a 1/4 of the labels are positive. This means that the expected weight for the bias should be close to the average prediction, i.e 0.25. Returns: Training data for the test. """ num_examples = 40 return { 'example_id': constant_op.constant([str(x + 1) for x in range(num_examples)]), # place_holder is an empty column which is always 0 (absent), because # LinearClassifier requires at least one column. 'place_holder': constant_op.constant([[0.0]] * num_examples), }, constant_op.constant( [[1 if i % 4 is 0 else 0] for i in range(num_examples)]) place_holder = feature_column_lib.real_valued_column('place_holder') sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') regressor = linear.LinearRegressor( feature_columns=[place_holder], optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=100) self.assertNear( regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1) def testSdcaOptimizerBiasAndOtherColumns(self): """Tests LinearClasssifier with SDCAOptimizer and validates bias weight.""" def input_fn(): """Testing the bias weight when there are other features present. 1/2 of the instances in this input have feature 'a', the rest have feature 'b', and we expect the bias to be added to each instance as well. 0.4 of all instances that have feature 'a' are positive, and 0.2 of all instances that have feature 'b' are positive. The labels in the dataset are ordered to appear shuffled since SDCA expects shuffled data, and converges faster with this pseudo-random ordering. If the bias was centered we would expect the weights to be: bias: 0.3 a: 0.1 b: -0.1 Until b/29339026 is resolved, the bias gets regularized with the same global value for the other columns, and so the expected weights get shifted and are: bias: 0.2 a: 0.2 b: 0.0 Returns: The test dataset. """ num_examples = 200 half = int(num_examples / 2) return { 'example_id': constant_op.constant([str(x + 1) for x in range(num_examples)]), 'a': constant_op.constant([[1]] * int(half) + [[0]] * int(half)), 'b': constant_op.constant([[0]] * int(half) + [[1]] * int(half)), }, constant_op.constant( [[x] for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) + [0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)]) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') regressor = linear.LinearRegressor( feature_columns=[ feature_column_lib.real_valued_column('a'), feature_column_lib.real_valued_column('b') ], optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=200) # TODO(b/29339026): Change the expected results to expect a centered bias. self.assertNear( regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05) self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05) self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05) def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self): """Tests LinearClasssifier with SDCAOptimizer and validates bias weight.""" def input_fn(): """Testing the bias weight when there are other features present. 1/2 of the instances in this input have feature 'a', the rest have feature 'b', and we expect the bias to be added to each instance as well. 0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of all instances that have feature 'b' have a label of -1. We can expect the weights to be: bias: 0.0 a: 0.1 b: -0.1 Returns: The test dataset. """ num_examples = 200 half = int(num_examples / 2) return { 'example_id': constant_op.constant([str(x + 1) for x in range(num_examples)]), 'a': constant_op.constant([[1]] * int(half) + [[0]] * int(half)), 'b': constant_op.constant([[0]] * int(half) + [[1]] * int(half)), }, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] + [[-1 if x % 10 == 0 else 0] for x in range(half)]) sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer( example_id_column='example_id') regressor = linear.LinearRegressor( feature_columns=[ feature_column_lib.real_valued_column('a'), feature_column_lib.real_valued_column('b') ], optimizer=sdca_optimizer) regressor.fit(input_fn=input_fn, steps=100) self.assertNear( regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05) self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05) self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05) def boston_input_fn(): boston = base.load_boston() features = math_ops.cast( array_ops.reshape(constant_op.constant(boston.data), [-1, 13]), dtypes.float32) labels = math_ops.cast( array_ops.reshape(constant_op.constant(boston.target), [-1, 1]), dtypes.float32) return features, labels class FeatureColumnTest(test.TestCase): def testTrain(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( boston_input_fn) est = linear.LinearRegressor(feature_columns=feature_columns) est.fit(input_fn=boston_input_fn, steps=1) _ = est.evaluate(input_fn=boston_input_fn, steps=1) if __name__ == '__main__': test.main()
apache-2.0
gkulkarni/JetMorphology
jet3d.py
1
1716
""" File: jet3d.py Creates mock jet data for fitjet_3d.py. """ import matplotlib as mpl import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from scipy.ndimage.filters import gaussian_filter as gf a = 0.1 b = 10.0 r = np.linspace(0.0,1.0,1000) def z(u): return (a/(2.0*np.pi)) * u * (u/(2.0*np.pi))**2 u = np.linspace(0.0, 20.0*np.pi, 1000) zv = z(u) def x(u): return (u**-0.2) * (b/(2.0*np.pi)) * u * np.cos(u) def y(u): return (u**-0.2) * (b/(2.0*np.pi)) * u * np.sin(u) xv = x(u) yv = y(u) def ri(i): return np.matrix([[np.cos(i), 0.0, np.sin(i)],[0.0, 1.0, 0.0],[-np.sin(i), 0.0, np.cos(i)]]) def rl(l): return np.matrix([[np.cos(l), -np.sin(l), 0.0],[np.sin(l), np.cos(l), 0.0],[0.0, 0.0, 1.0]]) i = 2.0 l = 3.0 zvarr = zv*0.5 iarr = zvarr/zvarr.max() iarr *= np.pi/2.0 c = np.dstack((xv, yv, zv)) c = np.squeeze(c) d = np.zeros((1000,3)) lm = rl(l) for n in range(1000): d[n] = c[n]*ri(iarr[n])*lm xv = d[1:,0] yv = d[1:,1] plt.plot(xv, yv) plt.show() #sys.exit() nc = 100 a = np.zeros((nc,nc),dtype=np.float32) zl = xv.min() - 5.0 zu = xv.max() + 5.0 yl = yv.min() - 5.0 yu = yv.max() + 5.0 lz = zu - zl ly = yu - yl print lz, ly dz = lz/nc dy = -ly/nc # Because "y" coordinate increases in opposite direction to "y" array index of a (or a2). def zloc(cood): return int((cood-zl)/dz) + 1 def yloc(cood): return int((cood-yl)/dy) + 1 for i in xrange(xv.size): zpos = zloc(xv[i]) ypos = yloc(yv[i]) a[ypos, zpos] += 1.0 a2 = gf(a, 1.0) save_data = False if save_data: a2.tofile('mockdata_3d_nc100.dat') # Save for fitjet_3d.py plt.imshow(a2, cmap=cm.Blues) plt.show()
mit
yuanming-hu/taichi
examples/euler.py
1
15196
import matplotlib.cm as cm import taichi as ti # A compressible euler equation solver using two methods # 1: 2nd order muscl # 2: thinc BVD, ref: "Limiter-free discontinuity-capturing scheme # for compressible gas dynamics with reactive fronts" real = ti.f32 ti.init(arch=ti.gpu, default_fp=real) N = 1024 # grid resolution CFL = .9 # keep below 1 method = 1 # 0:muscl, 1:thinc IC_type = 0 # 0:sod BC_type = 0 # 0:walls img_field = 0 # 0:density, 1: schlieren, 2:vorticity, 3: velocity mag res = 1024 # gui resolution cmap_name = 'magma_r' # python colormap use_fixed_caxis = 0 # 1: use fixed caxis limits, 0: automatic caxis limits fixed_caxis = [0.0, 5.0] # fixed caxis limits Q = ti.Vector.field(4, dtype=real, shape=(N, N)) # [rho, rho*u, rho*v, rho*e] consv vars Q_old = ti.Vector.field(4, dtype=real, shape=(N, N)) W = ti.Vector.field(4, dtype=real, shape=(N, N)) # [rho, u, v, p] cell avg W_xl = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # left side of x-face W_xr = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # right side of x-face W_yl = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # left side of y-face W_yr = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # right side of y-face F_x = ti.Vector.field(4, dtype=real, shape=(N, N)) # x-face flux F_y = ti.Vector.field(4, dtype=real, shape=(N, N)) # y-face flux dt = ti.field(dtype=real, shape=()) img = ti.field(dtype=ti.f32, shape=(res, res)) beta_smooth = 1.2 beta_sharp = 2.0 gamma = 1.4 # ratio of specific heats h = 1.0 / (N - 2) # cell size vol = h * h # cell volume @ti.func def is_interior_cell(i, j): return 0 < i < N - 1 and 0 < j < N - 1 @ti.func def is_interior_x_face(i, j): return 1 < i < N - 1 and 0 < j < N - 1 @ti.func def is_boundary_x_face(i, j): return (i == 1 or i == N - 1) and 0 < j < N - 1 @ti.func def is_interior_y_face(i, j): return 0 < i < N - 1 and 1 < j < N - 1 @ti.func def is_boundary_y_face(i, j): return 0 < i < N - 1 and (j == 1 or j == N - 1) @ti.func def get_cell_pos(i, j): return ti.Vector([i * h - h / 2.0, j * h - h / 2.0]) @ti.kernel def compute_W(): # conversion from conservative variables to primitive variables for i, j in Q: W[i, j] = q_to_w(Q[i, j]) @ti.kernel def copy_to_old(): for i, j in Q: Q_old[i, j] = Q[i, j] @ti.kernel def set_ic(): for i, j in Q: if IC_type == 0: # primitive variable initial conditions w_in = ti.Vector([10.0, 0.0, 0.0, 10.0]) w_out = ti.Vector([.125, 0.0, 0.0, .1]) pos = get_cell_pos(i, j) center = ti.Vector([.5, .5]) if (pos - center).norm() < .25: Q[i, j] = w_to_q(w_in) else: Q[i, j] = w_to_q(w_out) # implement more ic's later @ti.kernel def set_bc(): # enforce boundary conditions by setting ghost cells for i, j in Q: if not is_interior_cell(i, j): if BC_type == 0: # walls # enforce neumann=0 and zero normal velocity on face if i == 0: Q[i, j] = Q[i + 1, j] Q[i, j][1] = -Q[i + 1, j][1] if i == N - 1: Q[i, j] = Q[i - 1, j] # neumann 0 bc Q[i, j][1] = -Q[i - 1, j][1] # enforce 0 normal velocty at face if j == 0: Q[i, j] = Q[i, j + 1] Q[i, j][2] = -Q[i, j + 1][2] if j == N - 1: Q[i, j] = Q[i, j - 1] Q[i, j][2] = -Q[i, j - 1][2] # implement more bc's later @ti.func def mc_lim(r): # MC flux limiter return max(0.0, min(2.0 * r, min(.5 * (r + 1.0), 2.0))) @ti.func def w_to_q(w): # convert primitive variables to conserved variables q = ti.Vector([0.0, 0.0, 0.0, 0.0]) q[0] = w[0] # rho q[1] = w[0] * w[1] # rho*u q[2] = w[0] * w[2] # rho*v q[3] = w[0] * (w[3] / ((gamma - 1) * w[0]) + 0.5 * (w[1]**2 + w[2]**2)) # rho*e return q @ti.func def q_to_w(q): # convert conserved variables to primitive variables w = ti.Vector([0.0, 0.0, 0.0, 0.0]) w[0] = q[0] # rho w[1] = q[1] / q[0] # u w[2] = q[2] / q[0] # v w[3] = (gamma - 1) * (q[3] - 0.5 * (q[1]**2 + q[2]**2) / q[0]) # p return w @ti.func def HLLC_flux(qL, qR, n): # normal vector nx = n[0] ny = n[1] # Left state rL = qL[0] # rho uL = qL[1] / qL[0] # u vL = qL[2] / qL[0] # v pL = (gamma - 1.0) * (qL[3] - 0.5 * (qL[1]**2 + qL[2]**2) / qL[0]) #p vnL = uL * nx + vL * ny vtL = -uL * ny + vL * nx aL = ti.sqrt(gamma * pL / rL) HL = (qL[3] + pL) / rL # Right state rR = qR[0] # rho uR = qR[1] / qR[0] # u vR = qR[2] / qR[0] # v pR = (gamma - 1.0) * (qR[3] - 0.5 * (qR[1]**2 + qR[2]**2) / qR[0]) #p vnR = uR * nx + vR * ny vtR = -uR * ny + vR * nx aR = ti.sqrt(gamma * pR / rR) HR = (qR[3] + pR) / rR # Left and Right fluxes fL = ti.Vector([ rL * vnL, rL * vnL * uL + pL * nx, rL * vnL * vL + pL * ny, rL * vnL * HL ]) fR = ti.Vector([ rR * vnR, rR * vnR * uR + pR * nx, rR * vnR * vR + pR * ny, rR * vnR * HR ]) # Roe Averages rt = ti.sqrt(rR / rL) u = (uL + rt * uR) / (1.0 + rt) v = (vL + rt * vR) / (1.0 + rt) H = (HL + rt * HR) / (1.0 + rt) a = ti.sqrt((gamma - 1.0) * (H - (u**2 + v**2) / 2.0)) vn = u * nx + v * ny # wavespeeds sL = min(vnL - aL, vn - a) sR = max(vnR + aR, vn + a) sM = (pL - pR + rR * vnR * (sR - vnR) - rL * vnL * (sL - vnL)) / (rR * (sR - vnR) - rL * (sL - vnL)) # HLLC flux. HLLC = ti.Vector([0.0, 0.0, 0.0, 0.0]) if (0 <= sL): HLLC = fL elif (sL <= 0) and (0 <= sM): qsL = rL * (sL-vnL)/(sL-sM) \ * ti.Vector([1.0, sM*nx-vtL*ny,sM*ny+vtL*nx, \ qL[3]/rL + (sM-vnL)*(sM+pL/(rL*(sL-vnL)))]) HLLC = fL + sL * (qsL - qL) elif (sM <= 0) and (0 <= sR): qsR = rR * (sR-vnR)/(sR-sM) \ * ti.Vector([1.0, sM*nx-vtR*ny,sM*ny+vtR*nx, \ qR[3]/rR + (sM-vnR)*(sM+pR/(rR*(sR-vnR)))]) HLLC = fR + sR * (qsR - qR) elif (0 >= sR): HLLC = fR return HLLC @ti.kernel def compute_F_muscl(): for i, j in Q: if is_interior_x_face(i, j): # muscl reconstrucion of left and right states with HLLC flux wL = ti.Vector([0.0, 0.0, 0.0, 0.0]) wR = ti.Vector([0.0, 0.0, 0.0, 0.0]) for f in ti.static(range(4)): ratio_l = (W[i, j][f] - W[i - 1, j][f]) / (W[i - 1, j][f] - W[i - 2, j][f]) ratio_r = (W[i, j][f] - W[i - 1, j][f]) / (W[i + 1, j][f] - W[i, j][f]) wL[f] = W[i - 1, j][f] + 0.5 * mc_lim(ratio_l) * ( W[i - 1, j][f] - W[i - 2, j][f]) wR[f] = W[i, j][f] - 0.5 * mc_lim(ratio_r) * (W[i + 1, j][f] - W[i, j][f]) F_x[i, j] = HLLC_flux(w_to_q(wL), w_to_q(wR), ti.Vector([1.0, 0.0])) elif is_boundary_x_face(i, j): F_x[i, j] = HLLC_flux(Q[i - 1, j], Q[i, j], ti.Vector([1.0, 0.0])) if is_interior_y_face(i, j): # muscl reconstrucion of left and right states with HLLC flux wL = ti.Vector([0.0, 0.0, 0.0, 0.0]) wR = ti.Vector([0.0, 0.0, 0.0, 0.0]) for f in ti.static(range(4)): ratio_l = (W[i, j][f] - W[i, j - 1][f]) / (W[i, j - 1][f] - W[i, j - 2][f]) ratio_r = (W[i, j][f] - W[i, j - 1][f]) / (W[i, j + 1][f] - W[i, j][f]) wL[f] = W[i, j - 1][f] + 0.5 * mc_lim(ratio_l) * ( W[i, j - 1][f] - W[i, j - 2][f]) wR[f] = W[i, j][f] - 0.5 * mc_lim(ratio_r) * (W[i, j + 1][f] - W[i, j][f]) F_y[i, j] = HLLC_flux(w_to_q(wL), w_to_q(wR), ti.Vector([0.0, 1.0])) elif is_boundary_y_face(i, j): F_y[i, j] = HLLC_flux(Q[i, j - 1], Q[i, j], ti.Vector([0.0, 1.0])) @ti.func def sign(a): sgn = 0.0 if a > 0.0: sgn = 1.0 elif a < 0.0: sgn = -1.0 return sgn ti.func def cosh(a): return (ti.exp(a) + ti.exp(-a)) / 2.0 @ti.func def thinc(wl, wc, wr, beta): w0 = wc w1 = wc if (wr - wc) * (wc - wl) > 0.0: # use thinc reconstruction eps = 1.0e-15 wmin = min(wr, wl) wmax = max(wr, wl) wdelta = wmax - wmin theta = sign(wr - wl) C = (wc - wmin + eps) / (wdelta + eps) B = ti.exp(theta * beta * (2 * C - 1)) A = (B / cosh(beta) - 1) / ti.tanh(beta) # reconstructed value on right side of left face w0 = wmin + wdelta / 2.0 * (1.0 + theta * A) # reconstructed value on left side of right face w1 = wmin + wdelta / 2.0 * (1.0 + theta * (ti.tanh(beta) + A) / (1.0 + A * ti.tanh(beta))) return w0, w1 @ti.kernel def compute_F_thinc(): # reconstruct primitve variables on interior faces of each cell using # multiple candidate thinc reconstructions for i, j in Q: if is_interior_cell(i, j): for f in ti.static(range(4)): # smooth x-dir reconstruction w0, w1 = thinc(W[i - 1, j][f], W[i, j][f], W[i + 1, j][f], beta_smooth) W_xr[i, j, 0][f] = w0 W_xl[i + 1, j, 0][f] = w1 # sharp x-dir reconstruction w0, w1 = thinc(W[i - 1, j][f], W[i, j][f], W[i + 1, j][f], beta_sharp) W_xr[i, j, 1][f] = w0 W_xl[i + 1, j, 1][f] = w1 # smooth y-dir reconstruction w0, w1 = thinc(W[i, j - 1][f], W[i, j][f], W[i, j + 1][f], beta_smooth) W_yr[i, j, 0][f] = w0 W_yl[i, j + 1, 0][f] = w1 # sharp y-dir reconstruction w0, w1 = thinc(W[i, j - 1][f], W[i, j][f], W[i, j + 1][f], beta_sharp) W_yr[i, j, 1][f] = w0 W_yl[i, j + 1, 1][f] = w1 for i, j in Q: # choose the final reconstruction for each cell using the BVD algorithm if is_interior_cell(i, j): for f in ti.static(range(4)): # x-dir TBV_smooth = abs(W_xl[i,j,0][f] - W_xr[i,j,0][f]) \ + abs(W_xl[i+1,j,0][f] - W_xr[i+1,j,0][f]) TBV_sharp = abs(W_xl[i,j,1][f] - W_xr[i,j,1][f]) \ + abs(W_xl[i+1,j,1][f] - W_xr[i+1,j,1][f]) if TBV_smooth < TBV_sharp: W_xr[i, j, 2][f] = W_xr[i, j, 0][f] W_xl[i + 1, j, 2][f] = W_xl[i + 1, j, 0][f] else: W_xr[i, j, 2][f] = W_xr[i, j, 1][f] W_xl[i + 1, j, 2][f] = W_xl[i + 1, j, 1][f] # y-dir TBV_smooth = abs(W_yl[i,j,0][f] - W_yr[i,j,0][f]) \ + abs(W_yl[i,j+1,0][f] - W_yr[i,j+1,0][f]) TBV_sharp = abs(W_yl[i,j,1][f] - W_yr[i,j,1][f]) \ + abs(W_yl[i,j+1,1][f] - W_yr[i,j+1,1][f]) if TBV_smooth < TBV_sharp: W_yr[i, j, 2][f] = W_yr[i, j, 0][f] W_yl[i, j + 1, 2][f] = W_yl[i, j + 1, 0][f] else: W_yr[i, j, 2][f] = W_yr[i, j, 1][f] W_yl[i, j + 1, 2][f] = W_yl[i, j + 1, 1][f] for i, j in Q: # compute numerical fluxes of with Riemann solver if is_interior_x_face(i, j): # muscl reconstrucion of left and right states with HLLC flux F_x[i, j] = HLLC_flux(w_to_q(W_xl[i, j, 2]), w_to_q(W_xr[i, j, 2]), ti.Vector([1.0, 0.0])) elif is_boundary_x_face(i, j): F_x[i, j] = HLLC_flux(Q[i - 1, j], Q[i, j], ti.Vector([1.0, 0.0])) if is_interior_y_face(i, j): F_y[i, j] = HLLC_flux(w_to_q(W_yl[i, j, 2]), w_to_q(W_yr[i, j, 2]), ti.Vector([0.0, 1.0])) elif is_boundary_y_face(i, j): F_y[i, j] = HLLC_flux(Q[i, j - 1], Q[i, j], ti.Vector([0.0, 1.0])) @ti.kernel def calc_dt(): dt[None] = 1.0e5 for i, j in Q: w = q_to_w(Q[i, j]) a = ti.sqrt(gamma * w[3] / w[0]) vel = ti.sqrt(w[1]**2 + w[2]**2) ws = a + vel ti.atomic_min(dt[None], CFL * h / ws / 2.0) @ti.kernel def update_Q(rk_step: ti.template()): for i, j in Q: if is_interior_cell(i, j): if ti.static(rk_step == 0): Q[i, j] = Q[i, j] + dt[None] * (F_x[i, j] - F_x[i + 1, j] + F_y[i, j] - F_y[i, j + 1]) / h if ti.static(rk_step == 1): Q[i, j] = (Q[i, j] + Q_old[i, j]) / 2.0 + dt[None] * ( F_x[i, j] - F_x[i + 1, j] + F_y[i, j] - F_y[i, j + 1]) / h @ti.kernel def paint(): for i, j in img: ii = min(max(1, i * N // res), N - 2) jj = min(max(1, j * N // res), N - 2) if img_field == 0: # density img[i, j] = Q[ii, jj][0] elif img_field == 1: # numerical schlieren img[i, j] = ti.sqrt(((Q[ii + 1, jj][0] - Q[ii - 1, jj][0]) / h)**2 + ((Q[ii, jj + 1][0] - Q[ii, jj - 1][0]) / h)**2) elif img_field == 2: # vorticity img[i, j] = (Q[ii + 1, jj][2] - Q[ii - 1, jj][2]) / h - ( Q[ii, jj + 1][1] - Q[ii, jj - 1][1]) / h elif img_field == 3: # velocity magnitude img[i, j] = ti.sqrt(Q[ii, jj][1]**2 + Q[ii, jj][2]**2) max = -1.0e10 min = 1.0e10 for i, j in img: ti.atomic_max(max, img[i, j]) ti.atomic_min(min, img[i, j]) for i, j in img: if use_fixed_caxis: min = fixed_caxis[0] max = fixed_caxis[1] img[i, j] = (img[i, j] - min) / (max - min) gui = ti.GUI('Euler Equations', (res, res)) cmap = cm.get_cmap(cmap_name) set_ic() set_bc() n = 0 while gui.running: calc_dt() copy_to_old() for rk_step in range(2): compute_W() if method == 0: compute_F_muscl() else: compute_F_thinc() update_Q(rk_step) set_bc() if n % 10 == 0: paint() gui.set_image(cmap(img.to_numpy())) gui.show() n += 1
mit
gengliangwang/spark
python/pyspark/pandas/tests/indexes/test_category.py
15
4626
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import pandas as pd from pandas.api.types import CategoricalDtype import pyspark.pandas as ps from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils class CategoricalIndexTest(PandasOnSparkTestCase, TestUtils): def test_categorical_index(self): pidx = pd.CategoricalIndex([1, 2, 3]) psidx = ps.CategoricalIndex([1, 2, 3]) self.assert_eq(psidx, pidx) self.assert_eq(psidx.categories, pidx.categories) self.assert_eq(psidx.codes, pd.Index(pidx.codes)) self.assert_eq(psidx.ordered, pidx.ordered) pidx = pd.Index([1, 2, 3], dtype="category") psidx = ps.Index([1, 2, 3], dtype="category") self.assert_eq(psidx, pidx) self.assert_eq(psidx.categories, pidx.categories) self.assert_eq(psidx.codes, pd.Index(pidx.codes)) self.assert_eq(psidx.ordered, pidx.ordered) pdf = pd.DataFrame( { "a": pd.Categorical([1, 2, 3, 1, 2, 3]), "b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]), }, index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True), ) psdf = ps.from_pandas(pdf) pidx = pdf.set_index("b").index psidx = psdf.set_index("b").index self.assert_eq(psidx, pidx) self.assert_eq(psidx.categories, pidx.categories) self.assert_eq(psidx.codes, pd.Index(pidx.codes)) self.assert_eq(psidx.ordered, pidx.ordered) pidx = pdf.set_index(["a", "b"]).index.get_level_values(0) psidx = psdf.set_index(["a", "b"]).index.get_level_values(0) self.assert_eq(psidx, pidx) self.assert_eq(psidx.categories, pidx.categories) self.assert_eq(psidx.codes, pd.Index(pidx.codes)) self.assert_eq(psidx.ordered, pidx.ordered) def test_astype(self): pidx = pd.Index(["a", "b", "c"]) psidx = ps.from_pandas(pidx) self.assert_eq(psidx.astype("category"), pidx.astype("category")) self.assert_eq( psidx.astype(CategoricalDtype(["c", "a", "b"])), pidx.astype(CategoricalDtype(["c", "a", "b"])), ) pcidx = pidx.astype(CategoricalDtype(["c", "a", "b"])) kcidx = psidx.astype(CategoricalDtype(["c", "a", "b"])) self.assert_eq(kcidx.astype("category"), pcidx.astype("category")) if LooseVersion(pd.__version__) >= LooseVersion("1.2"): self.assert_eq( kcidx.astype(CategoricalDtype(["b", "c", "a"])), pcidx.astype(CategoricalDtype(["b", "c", "a"])), ) else: self.assert_eq( kcidx.astype(CategoricalDtype(["b", "c", "a"])), pidx.astype(CategoricalDtype(["b", "c", "a"])), ) self.assert_eq(kcidx.astype(str), pcidx.astype(str)) def test_factorize(self): pidx = pd.CategoricalIndex([1, 2, 3, None]) psidx = ps.from_pandas(pidx) pcodes, puniques = pidx.factorize() kcodes, kuniques = psidx.factorize() self.assert_eq(kcodes.tolist(), pcodes.tolist()) self.assert_eq(kuniques, puniques) pcodes, puniques = pidx.factorize(na_sentinel=-2) kcodes, kuniques = psidx.factorize(na_sentinel=-2) self.assert_eq(kcodes.tolist(), pcodes.tolist()) self.assert_eq(kuniques, puniques) if __name__ == "__main__": import unittest from pyspark.pandas.tests.indexes.test_category import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
ChanChiChoi/scikit-learn
examples/cluster/plot_dict_face_patches.py
337
2747
""" Online learning of a dictionary of parts of faces ================================================== This example uses a large dataset of faces to learn a set of 20 x 20 images patches that constitute faces. From the programming standpoint, it is interesting because it shows how to use the online API of the scikit-learn to process a very large dataset by chunks. The way we proceed is that we load an image at a time and extract randomly 50 patches from this image. Once we have accumulated 500 of these patches (using 10 images), we run the `partial_fit` method of the online KMeans object, MiniBatchKMeans. The verbose setting on the MiniBatchKMeans enables us to see that some clusters are reassigned during the successive calls to partial-fit. This is because the number of patches that they represent has become too low, and it is better to choose a random new cluster. """ print(__doc__) import time import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.image import extract_patches_2d faces = datasets.fetch_olivetti_faces() ############################################################################### # Learn the dictionary of images print('Learning the dictionary... ') rng = np.random.RandomState(0) kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True) patch_size = (20, 20) buffer = [] index = 1 t0 = time.time() # The online learning part: cycle over the whole dataset 6 times index = 0 for _ in range(6): for img in faces.images: data = extract_patches_2d(img, patch_size, max_patches=50, random_state=rng) data = np.reshape(data, (len(data), -1)) buffer.append(data) index += 1 if index % 10 == 0: data = np.concatenate(buffer, axis=0) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) kmeans.partial_fit(data) buffer = [] if index % 100 == 0: print('Partial fit of %4i out of %i' % (index, 6 * len(faces.images))) dt = time.time() - t0 print('done in %.2fs.' % dt) ############################################################################### # Plot the results plt.figure(figsize=(4.2, 4)) for i, patch in enumerate(kmeans.cluster_centers_): plt.subplot(9, 9, i + 1) plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' % (dt, 8 * len(faces.images)), fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
bsd-3-clause
RobertABT/heightmap
build/matplotlib/examples/pylab_examples/usetex_demo.py
12
2812
import matplotlib matplotlib.rc('text', usetex = True) import pylab import numpy as np ## interface tracking profiles N = 500 delta = 0.6 X = -1 + 2. * np.arange(N) / (N - 1) pylab.plot(X, (1 - np.tanh(4. * X / delta)) / 2, ## phase field tanh profiles X, (X + 1) / 2, ## level set distance function X, (1.4 + np.tanh(4. * X / delta)) / 4, ## composition profile X, X < 0, 'k--', ## sharp interface linewidth = 5) ## legend pylab.legend((r'phase field', r'level set', r'composition', r'sharp interface'), shadow = True, loc = (0.01, 0.55)) ltext = pylab.gca().get_legend().get_texts() pylab.setp(ltext[0], fontsize = 20, color = 'b') pylab.setp(ltext[1], fontsize = 20, color = 'g') pylab.setp(ltext[2], fontsize = 20, color = 'r') pylab.setp(ltext[3], fontsize = 20, color = 'k') ## the arrow height = 0.1 offset = 0.02 pylab.plot((-delta / 2., delta / 2), (height, height), 'k', linewidth = 2) pylab.plot((-delta / 2, -delta / 2 + offset * 2), (height, height - offset), 'k', linewidth = 2) pylab.plot((-delta / 2, -delta / 2 + offset * 2), (height, height + offset), 'k', linewidth = 2) pylab.plot((delta / 2, delta / 2 - offset * 2), (height, height - offset), 'k', linewidth = 2) pylab.plot((delta / 2, delta / 2 - offset * 2), (height, height + offset), 'k', linewidth = 2) pylab.text(-0.06, height - 0.06, r'$\delta$', {'color' : 'k', 'fontsize' : 24}) ## X-axis label pylab.xticks((-1, 0, 1), ('-1', '0', '1'), color = 'k', size = 20) ## Left Y-axis labels pylab.ylabel(r'\bf{phase field} $\phi$', {'color' : 'b', 'fontsize' : 20 }) pylab.yticks((0, 0.5, 1), ('0', '.5', '1'), color = 'k', size = 20) ## Right Y-axis labels pylab.text(1.05, 0.5, r"\bf{level set} $\phi$", {'color' : 'g', 'fontsize' : 20}, horizontalalignment = 'left', verticalalignment = 'center', rotation = 90, clip_on = False) pylab.text(1.01, -0.02, "-1", {'color' : 'k', 'fontsize' : 20}) pylab.text(1.01, 0.98, "1", {'color' : 'k', 'fontsize' : 20}) pylab.text(1.01, 0.48, "0", {'color' : 'k', 'fontsize' : 20}) ## level set equations pylab.text(0.1, 0.85, r'$|\nabla\phi| = 1,$ \newline $ \frac{\partial \phi}{\partial t} + U|\nabla \phi| = 0$', {'color' : 'g', 'fontsize' : 20}) ## phase field equations pylab.text(0.2, 0.15, r'$\mathcal{F} = \int f\left( \phi, c \right) dV,$ \newline $ \frac{ \partial \phi } { \partial t } = -M_{ \phi } \frac{ \delta \mathcal{F} } { \delta \phi }$', {'color' : 'b', 'fontsize' : 20}) ## these went wrong in pdf in a previous version pylab.text(-.9,.42,r'gamma: $\gamma$', {'color': 'r', 'fontsize': 20}) pylab.text(-.9,.36,r'Omega: $\Omega$', {'color': 'b', 'fontsize': 20}) pylab.show()
mit
ThomasMiconi/nupic.research
htmresearch/frameworks/layers/physical_object_base.py
10
4309
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2016, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- from abc import ABCMeta, abstractmethod try: from mpl_toolkits.mplot3d import Axes3D except ImportError: print "Your Matplotlib version is not up to date. " \ "Don't use plotting functions" import matplotlib.pyplot as plt class PhysicalObject(object): """ Base class to create physical objects, for L4-L2 inference experiments. Physical objects have a set of features, constant over ranges of locations, so that locations can be sampled from these features. As far as physical objects are concerned, features as encoded as integers. The ObjectMachine will take care of mapping them to SDR's. All objects should implement the abstract methods defined below. The "epsilon" parameter specifies the resolution of methods matching locations to features and checking if a location is on the object. It serves two purposes: - it avoids having a null probability of sampling an edge - it compensates the fact that locations are converted to integers before being passed to the encoder in the ObjectMachine Note that because locations are integers, rather large object sizes should be used. """ __metaclass__ = ABCMeta # typical feature indices EMPTY_FEATURE = -1 FLAT = 0 EDGE = 1 SPHERICAL_SURFACE = 2 CYLINDER_SURFACE = 3 CYLINDER_EDGE = 4 POINTY = 5 SURFACE = 6 # default resolution to use for matching locations DEFAULT_EPSILON = 2 # each physical objects has a list of features to sample from _FEATURES = [] @abstractmethod def getFeatureID(self, location): """ Returns the feature index associated with the provided location. If the location is not valid (i.e. not on the object's surface), -1 is returned, which will yield an empty sensory input. """ @abstractmethod def contains(self, location): """ Checks that the object contains the provided location, i.e. that it is on the object's surface (at epsilon's precision). """ @abstractmethod def sampleLocation(self): """ Sample a location from the object. The locations should be sampled uniformly whenever is possible. """ @abstractmethod def sampleLocationFromFeature(self, feature): """ Samples a location from the provided specific feature. """ def almostEqual(self, number, other): """ Checks that the two provided number are equal with a precision of epsilon. Epsilon should be specified at construction, otherwise a default value will be used. """ return abs(number - other) <= self.epsilon def getFeatures(self): """ Returns the list of object feature spans, from which the user can sample locations. """ return self._FEATURES def plot(self, numPoints=100): """ Plots the object in a 3D scatter. This method should be overriden when possible. This default behavior simply samples numPoints points from the object and plots them in a 3d scatter. """ fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for feature in self._FEATURES: for _ in xrange(numPoints): x, y, z = tuple(self.sampleLocationFromFeature(feature)) ax.scatter(x, y, z, marker=".") ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.title("{}".format(self)) return fig, ax
agpl-3.0
jiajunshen/partsNet
scripts/popModuleShift.py
1
13762
from __future__ import division, print_function,absolute_import import pylab as plt import amitgroup.plot as gr import numpy as np import amitgroup as ag import os import pnet import matplotlib.pylab as plot from pnet.cyfuncs import index_map_pooling from queue import Queue def extract(ims,allLayers): #print(allLayers) curX = ims for layer in allLayers: #print('-------------') #print(layer) curX = layer.extract(curX) #print(np.array(curX).shape) #print('------------------') return curX def partsPool(originalPartsRegion, numParts): partsGrid = np.zeros((1,1,numParts)) for i in range(originalPartsRegion.shape[0]): for j in range(originalPartsRegion.shape[1]): if(originalPartsRegion[i,j]!=-1): partsGrid[0,0,originalPartsRegion[i,j]] = 1 return partsGrid def test(ims,labels,net): yhat = net.classify((ims,1000)) return yhat == labels #def trainPOP(): if pnet.parallel.main(__name__): #X = np.load("testMay151.npy") #X = np.load("_3_100*6*6_1000*1*1_Jun_16_danny.npy") #X = np.load("original6*6.npy") #X = np.load("sequential6*6.npy") X = np.load("testJul16.npy") model = X.item() # get num of Parts numParts = model['layers'][1]['num_parts'] print(numParts,model) net = pnet.PartsNet.load_from_dict(model) allLayer = net.layers ims,labels = ag.io.load_mnist('training') trainingDataNum = 1000 firstLayerShape = 8 extractedFeature = extract(ims[0:trainingDataNum],allLayer[0:2])[0] print(extractedFeature.shape) extractedFeature = extractedFeature.reshape(extractedFeature.shape[0:3]) partsPlot = np.zeros((numParts,firstLayerShape,firstLayerShape)) partsCodedNumber = np.zeros(numParts) imgRegion= [[] for x in range(numParts)] partsRegion = [[] for x in range(numParts)] for i in range(trainingDataNum): codeParts = extractedFeature[i] for m in range(29 - firstLayerShape): for n in range(29 - firstLayerShape): if(codeParts[m,n]!=-1): partsPlot[codeParts[m,n]]+=ims[i,m:m+firstLayerShape,n:n+firstLayerShape] partsCodedNumber[codeParts[m,n]]+=1 for j in range(numParts): partsPlot[j] = partsPlot[j]/partsCodedNumber[j] secondLayerCodedNumber = 0 secondLayerShape = 12 frame = (secondLayerShape - firstLayerShape)/2 frame = int(frame) totalRange = 29 - firstLayerShape if 1: for i in range(trainingDataNum): codeParts = extractedFeature[i] for m in range(totalRange)[frame:totalRange - frame]: for n in range(totalRange)[frame:totalRange - frame]: if(codeParts[m,n]!=-1): imgRegion[codeParts[m,n]].append(ims[i, m - frame:m + secondLayerShape - frame,n - frame:n + secondLayerShape - frame]) secondLayerCodedNumber+=1 partsGrid = partsPool(codeParts[m-frame:m+frame + 1,n-frame:n+frame + 1],numParts) partsRegion[codeParts[m,n]].append(partsGrid) newPartsRegion = [] for i in range(numParts): newPartsRegion.append(np.asarray(partsRegion[i],dtype = np.uint8)) #np.save('/var/tmp/partsRegionOriginalJun29.npy',newPartsRegion) #np.save('/var/tmp/imgRegionOriginalJun29.npy',imgRegion) ##second-layer parts numSecondLayerParts = 10 allPartsLayer = [[pnet.PartsLayer(numSecondLayerParts,(1,1), settings=dict(outer_frame = 0, threshold = 5, sample_per_image = 1, max_samples=10000, min_prob = 0.005))] for i in range(numParts)] allPartsLayerImg = np.zeros((numParts,numSecondLayerParts,secondLayerShape,secondLayerShape)) allPartsLayerImgNumber = np.zeros((numParts,numSecondLayerParts)) zeroParts = 0 imgRegionPool = [[] for i in range(numParts * numSecondLayerParts)] for i in range(numParts): if(not partsRegion[i]): continue allPartsLayer[i][0].train_from_samples(np.array(partsRegion[i]),None) extractedFeaturePart = extract(np.array(partsRegion[i],dtype = np.uint8),allPartsLayer[i])[0] #print(extractedFeaturePart.shape) for j in range(len(partsRegion[i])): if(extractedFeaturePart[j,0,0,0]!=-1): partIndex = extractedFeaturePart[j,0,0,0] allPartsLayerImg[i,partIndex]+=imgRegion[i][j] imgRegionPool[i * numSecondLayerParts + partIndex].append(imgRegion[i][j]) allPartsLayerImgNumber[i,partIndex]+=1 else: zeroParts+=1 for i in range(numParts): for j in range(numSecondLayerParts): if(allPartsLayerImgNumber[i,j]): allPartsLayerImg[i,j] = allPartsLayerImg[i,j]/allPartsLayerImgNumber[i,j] #np.save("exPartsOriginalJun29.npy",allPartsLayer) """ Visualize the SuperParts """ settings = {'interpolation':'nearest','cmap':plot.cm.gray,} settings['vmin'] = 0 settings['vmax'] = 1 plotData = np.ones(((2 + secondLayerShape)*100+2,(2+secondLayerShape)*(numSecondLayerParts + 1)+2))*0.8 visualShiftParts = 0 if 0: allPartsPlot = np.zeros((20,numSecondLayerParts + 1,12,12)) gr.images(partsPlot.reshape(numParts,6,6),zero_to_one=False,vmin = 0, vmax = 1) allPartsPlot[:,0] = 0.5 allPartsPlot[:,0,3:9,3:9] = partsPlot[20:40] allPartsPlot[:,1:,:,:] = allPartsLayerImg[20:40] gr.images(allPartsPlot.reshape(20 * (numSecondLayerParts + 1),12,12),zero_to_one=False, vmin = 0, vmax =1) elif 1: for i in range(numSecondLayerParts + 1): for j in range(100): if i == 0: plotData[5 + j * (2 + secondLayerShape):5+firstLayerShape + j * (2 + secondLayerShape), 5 + i * (2 + secondLayerShape): 5+firstLayerShape + i * (2 + secondLayerShape)] = partsPlot[j+visualShiftParts] else: plotData[2 + j * (2 + secondLayerShape):2 + secondLayerShape+ j * (2 + secondLayerShape),2 + i * (2 + secondLayerShape): 2+ secondLayerShape + i * (2 + secondLayerShape)] = allPartsLayerImg[j+visualShiftParts,i-1] plot.figure(figsize=(10,40)) plot.axis('off') plot.imshow(plotData, **settings) plot.savefig('originalExParts.pdf',format='pdf',dpi=900) else: pass """ Train A Class-Model Layer """ digits = range(10) sup_ims = [] sup_labels = [] classificationTrainingNum = 100 for d in digits: ims0 = ag.io.load_mnist('training', [d], selection = slice(classificationTrainingNum), return_labels = False) sup_ims.append(ims0) sup_labels.append(d * np.ones(len(ims0),dtype = np.int64)) sup_ims = np.concatenate(sup_ims, axis = 0) sup_labels = np.concatenate(sup_labels,axis = 0) curX = extract(sup_ims,allLayer[0:2])[0] #print(curX.shape) curX = curX.reshape(curX.shape[0:3]) secondLevelCurx = np.zeros((10 * classificationTrainingNum,29 - secondLayerShape,29 - secondLayerShape,1,1,numParts)) secondLevelCurxCenter = np.zeros((10 * classificationTrainingNum,29- secondLayerShape,29 - secondLayerShape)) #for i in range(10 * classificationTrainingNum): # codeParts = curX[i] for m in range(totalRange)[frame:totalRange-frame]: for n in range(totalRange)[frame:totalRange-frame]: secondLevelCurx[:,m-frame,n-frame] = index_map_pooling(curX[:,m-frame:m+frame+1,n-frame:n+frame+1],numParts,(2 * frame + 1,2 * frame + 1),(2 * frame + 1,2 * frame + 1)) secondLevelCurxCenter[:,m-frame,n-frame] = curX[:,m,n] thirdLevelCurx = np.zeros((10 * classificationTrainingNum, 29 - secondLayerShape,29 - secondLayerShape)) for i in range(int(10 * classificationTrainingNum)): for m in range(29 - secondLayerShape): for n in range(29 - secondLayerShape): if(secondLevelCurxCenter[i,m,n]!=-1): firstLevelPartIndex = secondLevelCurxCenter[i,m,n] #print(firstLevelPartIndex) firstLevelPartIndex = int(firstLevelPartIndex) extractedFeaturePart = extract(np.array(secondLevelCurx[i,m,n][np.newaxis,:],dtype = np.uint8),allPartsLayer[firstLevelPartIndex])[0] #print("secondLayerExtraction") #print(extractedFeaturePart.shape) thirdLevelCurx[i,m,n] = int(numSecondLayerParts * firstLevelPartIndex + extractedFeaturePart) #print(numSecondLayerParts,firstLevelPartIndex,extractedFeaturePart,thirdLevelCurx[i,m,n]) else: thirdLevelCurx[i,m,n] = -1 print(thirdLevelCurx.shape) #return thirdLevelCurx,allPartsLayerImg if 1: classificationLayers = [ pnet.PoolingLayer(shape = (4,4),strides = (4,4)), #pnet.MixtureClassificationLayer(n_components = 5, min_prob = 1e-7, block_size = 20) pnet.SVMClassificationLayer(C=1.0) ] classificationNet = pnet.PartsNet(classificationLayers) classificationNet.train((np.array(thirdLevelCurx[:,:,:,np.newaxis],dtype = np.int64),int(numParts * numSecondLayerParts)),sup_labels[:]) print("Training Success!!") if 1: testImg,testLabels = ag.io.load_mnist('testing') testingNum = testLabels.shape[0] print("training extract Begin") curTestX = extract(testImg, allLayer[0:2])[0] print("training extract End") curTestX = curTestX.reshape(curTestX.shape[0:3]) secondLevelCurTestX = np.zeros((testingNum, 29 - secondLayerShape,29 - secondLayerShape,1,1,numParts)) secondLevelCurTestXCenter = np.zeros((testingNum, 29 - secondLayerShape,29 - secondLayerShape)) import time start = time.time() #for i in range(testingNum): # codeParts = curTestX[i] for m in range(totalRange)[frame:totalRange - frame]: for n in range(totalRange)[frame:totalRange-frame]: secondLevelCurTestX[:,m-frame,n-frame] = index_map_pooling(curTestX[:,m-frame:m+frame + 1,n-frame:n+frame + 1],numParts,(2 * frame + 1,2 * frame + 1),(2 * frame + 1,2 * frame + 1)) secondLevelCurTestXCenter[:,m-frame,n-frame] = curTestX[:,m,n] afterPool = time.time() print(afterPool - start) thirdLevelCurTestX = np.zeros((testingNum, 29 - secondLayerShape, 29 - secondLayerShape)) featureMap = [[] for i in range(numParts)] for i in range(testingNum): for m in range(29 - secondLayerShape): for n in range(29 - secondLayerShape): if(secondLevelCurTestXCenter[i,m,n]!=-1): firstLevelPartIndex = int(secondLevelCurTestXCenter[i,m,n]) featureMap[firstLevelPartIndex].append(np.array(secondLevelCurTestX[i,m,n],dtype = np.uint8)) #extractedFeaturePart = extract(np.array(secondLevelCurTestX[i,m,n][np.newaxis,:],dtype = np.uint8),allPartsLayer[firstLevelPartIndex])[0] #thirdLevelCurTestX[i,m,n] = numSecondLayerParts * firstLevelPartIndex + extractedFeaturePart #else: #thirdLevelCurTestX[i,m,n] = -1 extractedFeatureMap = [Queue() for i in range(numParts)] for i in range(numParts): partFeatureMap = np.array(featureMap[i],dtype = np.uint8) allExtractedFeature = extract(np.array(partFeatureMap),allPartsLayer[i])[0] for feature in allExtractedFeature: extractedFeatureMap[i].put(feature) for i in range(testingNum): for m in range(29 - secondLayerShape): for n in range(29 - secondLayerShape): if(secondLevelCurTestXCenter[i,m,n]!=-1): firstLevelPartIndex = int(secondLevelCurTestXCenter[i,m,n]) if(extractedFeatureMap[firstLevelPartIndex].qsize()==0): print("something is wrong") extractedFeaturePart = -1 else: extractedFeaturePart = extractedFeatureMap[firstLevelPartIndex].get() thirdLevelCurTestX[i,m,n] = numSecondLayerParts * firstLevelPartIndex + extractedFeaturePart else: thirdLevelCurTestX[i,m,n] = -1 end = time.time() print(end-afterPool) print(thirdLevelCurTestX.shape) testImg_Input = np.array(thirdLevelCurTestX[:,:,:,np.newaxis],dtype = np.int64) testImg_batches = np.array_split(testImg_Input,200) testLabels_batches = np.array_split(testLabels, 200) args = [tup + (classificationNet,) for tup in zip(testImg_batches,testLabels_batches)] corrects = 0 total = 0 def format_error_rate(pr): return "{:.2f}%".format(100 * (1-pr)) print("Testing Starting...") for i, res in enumerate(pnet.parallel.starmap_unordered(test,args)): if i !=0 and i % 20 ==0: print("{0:05}/{1:05} Error rate: {2}".format(total, len(ims),format_error_rate(pr))) corrects += res.sum() total += res.size pr = corrects / total print("Final error rate:", format_error_rate(pr))
bsd-3-clause
BigTone2009/sms-tools
lectures/05-Sinusoidal-model/plots-code/spec-sine-synthesis-lobe.py
24
2626
import numpy as np import matplotlib.pyplot as plt from scipy.signal import hamming, triang, blackmanharris from scipy.fftpack import fft, ifft import math import sys, os, functools, time sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/')) import stft as STFT import sineModel as SM import utilFunctions as UF M = 256 N = 256 hN = N/2 hM = int(M/2.0) fs = 44100 f0 = 5000.0 A0 = 1 ph = 1.5 t = np.arange(-hM,hM)/float(fs) x = A0 * np.cos(2*np.pi*f0*t+ph) w = hamming(M) xw = x*w fftbuffer = np.zeros(N) fftbuffer[0:M] = xw X = fft(fftbuffer) mX = abs(X) pX = np.angle(X[0:hN]) powerX = sum(2*mX[0:hN]**2)/N mask = np.zeros(N/2) mask[int(N*f0/fs-2*N/float(M)):int(N*f0/fs+3*N/float(M))] = 1.0 mY = mask*mX[0:hN] powerY = sum(2*mY[0:hN]**2)/N Y = np.zeros(N, dtype = complex) Y[:hN] = mY * np.exp(1j*pX) Y[hN+1:] = mY[:0:-1] * np.exp(-1j*pX[:0:-1]) y = ifft(Y) SNR1 = -10*np.log10((powerX-powerY)/(powerX)) freqaxis = fs*np.arange(0,N/2)/float(N) taxis = np.arange(N)/float(fs) plt.figure(1, figsize=(9, 6)) plt.subplot(3,2,1) plt.plot(20*np.log10(mY[:hN])-max(20*np.log10(mY[:hN])), 'r', lw=1.5) plt.title ('mX, mY (main lobe); Hamming') plt.plot(20*np.log10(mX[:hN])-max(20*np.log10(mX[:hN])), 'r', lw=1.5, alpha=.2) plt.axis([0,hN,-120,0]) plt.subplot(3,2,3) plt.plot(y[0:M], 'b', lw=1.5) plt.axis([0,M,-1,1]) plt.title ('y (synthesis of main lobe)') plt.subplot(3,2,5) yerror = xw - y plt.plot(yerror, 'k', lw=1.5) plt.axis([0,M,-.003,.003]) plt.title ("error function: x-y; SNR = ${%d}$ dB" %(SNR1)) w = blackmanharris(M) xw = x*w fftbuffer = np.zeros(N) fftbuffer[0:M] = xw X = fft(fftbuffer) mX = abs(X) pX = np.angle(X[0:hN]) powerX = sum(2*mX[0:hN]**2)/N mask = np.zeros(N/2) mask[int(N*f0/fs-4*N/float(M)):int(N*f0/fs+5*N/float(M))] = 1.0 mY = mask*mX[0:hN] powerY = sum(2*mY[0:hN]**2)/N Y = np.zeros(N, dtype = complex) Y[:hN] = mY * np.exp(1j*pX) Y[hN+1:] = mY[:0:-1] * np.exp(-1j*pX[:0:-1]) y = ifft(Y) SNR2 = -10*np.log10((powerX-powerY)/(powerX)) plt.subplot(3,2,2) plt.plot(20*np.log10(mY[:hN])-max(20*np.log10(mY[:hN])), 'r', lw=1.5) plt.title ('mX, mY (main lobe); Blackman Harris') plt.plot(20*np.log10(mX[:hN])-max(20*np.log10(mX[:hN])), 'r', lw=1.5, alpha=.2) plt.axis([0,hN,-120,0]) plt.subplot(3,2,4) plt.plot(y[0:M], 'b', lw=1.5) plt.axis([0,M,-1,1]) plt.title ('y (synthesis of main lobe)') plt.subplot(3,2,6) yerror2 = xw - y plt.plot(yerror2, 'k', lw=1.5) plt.axis([0,M,-.003,.003]) plt.title ("error function: x-y; SNR = ${%d}$ dB" %(SNR2)) plt.tight_layout() plt.savefig('spec-sine-synthesis-lobe.png') plt.show()
agpl-3.0
jzt5132/scikit-learn
examples/classification/plot_classifier_comparison.py
66
4895
#!/usr/bin/python # -*- coding: utf-8 -*- """ ===================== Classifier comparison ===================== A comparison of a several classifiers in scikit-learn on synthetic datasets. The point of this example is to illustrate the nature of decision boundaries of different classifiers. This should be taken with a grain of salt, as the intuition conveyed by these examples does not necessarily carry over to real datasets. Particularly in high-dimensional spaces, data can more easily be separated linearly and the simplicity of classifiers such as naive Bayes and linear SVMs might lead to better generalization than is achieved by other classifiers. The plots show training points in solid colors and testing points semi-transparent. The lower right shows the classification accuracy on the test set. """ print(__doc__) # Code source: Gaël Varoquaux # Andreas Müller # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis h = .02 # step size in the mesh names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree", "Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis", "Quadratic Discriminant Analysis"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), AdaBoostClassifier(), GaussianNB(), LinearDiscriminantAnalysis(), QuadraticDiscriminantAnalysis()] X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable ] figure = plt.figure(figsize=(27, 9)) i = 1 # iterate over datasets for ds in datasets: # preprocess dataset, split into training and test part X, y = ds X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot also the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right') i += 1 figure.subplots_adjust(left=.02, right=.98) plt.show()
bsd-3-clause
WangWenjun559/Weiss
summary/sumy/sklearn/cluster/__init__.py
364
1228
""" The :mod:`sklearn.cluster` module gathers popular unsupervised clustering algorithms. """ from .spectral import spectral_clustering, SpectralClustering from .mean_shift_ import (mean_shift, MeanShift, estimate_bandwidth, get_bin_seeds) from .affinity_propagation_ import affinity_propagation, AffinityPropagation from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree, FeatureAgglomeration) from .k_means_ import k_means, KMeans, MiniBatchKMeans from .dbscan_ import dbscan, DBSCAN from .bicluster import SpectralBiclustering, SpectralCoclustering from .birch import Birch __all__ = ['AffinityPropagation', 'AgglomerativeClustering', 'Birch', 'DBSCAN', 'KMeans', 'FeatureAgglomeration', 'MeanShift', 'MiniBatchKMeans', 'SpectralClustering', 'affinity_propagation', 'dbscan', 'estimate_bandwidth', 'get_bin_seeds', 'k_means', 'linkage_tree', 'mean_shift', 'spectral_clustering', 'ward_tree', 'SpectralBiclustering', 'SpectralCoclustering']
apache-2.0
pv/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
286
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <[email protected]> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
bburan/psiexperiment
psi/data/io/__init__.py
1
7096
''' This module provides classes and functions that facilitate working with recordings created by psiexperiment. The base class of all experiments is `Recording`. Some subclasses (e.g., `psi.data.io.abr.ABRFile`) offer more specialized support for a particular experiment type. ''' import logging log = logging.getLogger(__name__) import functools from pathlib import Path import numpy as np import pandas as pd from scipy import signal def get_unique_columns(df, exclude=None): return [c for c in df if (len(df[c].unique()) > 1) and (c not in exclude)] class Recording: ''' Wrapper around a recording created by psiexperiment Parameters ---------- base_path : :obj:`str` or :obj:`pathlib.Path` Folder containing recordings Attributes ---------- base_path : pathlib.Path Folder containing recordings carray_names : set List of Bcolz carrays in this recording ctable_names : set List of Bcolz ctables in this recording ttable_names : set List of CSV-formatted tables in this recording The `__getattr__` method is implemented to allow accessing arrays and tables by name. For example, if you have a ctable called `erp_metadata`: recording = Recording(base_path) erp_md = recording.erp_metadata When using this approach, all tables are loaded into memory and returned as instances of `pandas.DataFrame`. All arrays are returned as `Signal` instances. Signal instances do not load the data into memory until the data is requested. ''' #: Mapping of names for CSV-formatted table to a list of columns that #: should be used as indices. For example: #: {'tone_sens': ['channel_name', 'frequency']} #: This attribute is typically used by subclasses to automate handling of #: loading tables into DataFrames. _ttable_indices = {} def __init__(self, base_path): bp = Path(base_path) self.base_path = bp self._refresh_names() def _refresh_names(self): ''' Utility function to refresh list of signals and tables This is primarily used by repair functions that may need to fix various items in the folder when the class is first created. ''' bp = self.base_path self.carray_names = {d.parent.stem for d in bp.glob('*/meta')} self.ctable_names = {d.parent.parent.stem for d in bp.glob('*/*/meta')} self.ttable_names = {d.stem for d in bp.glob('*.csv')} def __getattr__(self, attr): if attr in self.carray_names: return self._load_bcolz_signal(attr) if attr in self.ctable_names: return self._load_bcolz_table(attr) elif attr in self.ttable_names: return self._load_text_table(attr) raise AttributeError def __repr__(self): lines = [f'Recording at {self.base_path.name} with:'] if self.carray_names: lines.append(f'* Bcolz carrays {self.carray_names}') if self.ctable_names: lines.append(f'* Bcolz ctables {self.ctable_names}') if self.ttable_names: lines.append(f'* CSV tables {self.ttable_names}') return '\n'.join(lines) @functools.lru_cache() def _load_bcolz_signal(self, name): from .bcolz_tools import BcolzSignal return BcolzSignal(self.base_path / name) @functools.lru_cache() def _load_bcolz_table(self, name): from .bcolz_tools import load_ctable_as_df return load_ctable_as_df(self.base_path / name) @functools.lru_cache() def _load_text_table(self, name): import pandas as pd path = (self.base_path / name).with_suffix('.csv') if path.stat().st_size == 0: return pd.DataFrame() index_col = self._ttable_indices.get(name, None) df = pd.read_csv(path, index_col=index_col) drop = [c for c in df.columns if c.startswith('Unnamed:')] return df.drop(columns=drop) class Signal: def get_epochs(self, md, offset, duration, detrend=None, columns='auto'): fn = self.get_segments return self._get_epochs(fn, md, offset, duration, detrend=detrend, columns=columns) def get_epochs_filtered(self, md, offset, duration, filter_lb, filter_ub, filter_order=1, detrend='constant', pad_duration=10e-3, columns='auto'): fn = self.get_segments_filtered return self._get_epochs(fn, md, offset, duration, filter_lb, filter_ub, filter_order, detrend, pad_duration, columns=columns) def _get_epochs(self, fn, md, *args, columns='auto', **kwargs): if columns == 'auto': columns = get_unique_columns(md, exclude=['t0']) t0 = md['t0'].values arrays = [md[c] for c in columns] arrays.append(t0) df = fn(t0, *args, **kwargs) df.index = pd.MultiIndex.from_arrays(arrays, names=columns + ['t0']) return df def get_segments(self, times, offset, duration, detrend=None): times = np.asarray(times) indices = np.round((times + offset) * self.fs).astype('i') samples = round(duration * self.fs) m = (indices >= 0) & ((indices + samples) < self.shape[-1]) if not m.all(): i = np.flatnonzero(~m) log.warn('Missing epochs %d', i) values = np.concatenate([self[i:i+samples][np.newaxis] \ for i in indices[m]]) if detrend is not None: values = signal.detrend(values, axis=-1, type=detrend) t = np.arange(samples)/self.fs + offset columns = pd.Index(t, name='time') index = pd.Index(times[m], name='t0') df = pd.DataFrame(values, index=index, columns=columns) return df.reindex(times) def _get_segments_filtered(self, fn, offset, duration, filter_lb, filter_ub, filter_order=1, detrend='constant', pad_duration=10e-3): Wn = (filter_lb/(0.5*self.fs), filter_ub/(0.5*self.fs)) b, a = signal.iirfilter(filter_order, Wn, btype='band', ftype='butter') df = fn(offset-pad_duration, duration+pad_duration, detrend) df[:] = signal.filtfilt(b, a, df.values, axis=-1) return df.loc[:, offset:offset+duration] def get_random_segments(self, n, offset, duration, detrend): t_min = -offset t_max = self.duration-duration-offset times = np.random.uniform(t_min, t_max, size=n) return self.get_segments(times, offset, duration, detrend) def get_segments_filtered(self, times, *args, **kwargs): fn = functools.partial(self.get_segments, times) return self._get_segments_filtered(fn, *args, **kwargs) def get_random_segments_filtered(self, n, *args, **kwargs): fn = functools.partial(self.get_random_segments, n) return self._get_segments_filtered(fn, *args, **kwargs)
mit
pkainz/pylearn2
pylearn2/cross_validation/dataset_iterators.py
29
19389
""" Cross-validation dataset iterators. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" import numpy as np import warnings try: from sklearn.cross_validation import (KFold, StratifiedKFold, ShuffleSplit, StratifiedShuffleSplit) except ImportError: warnings.warn("Could not import from sklearn.") from pylearn2.compat import OrderedDict from pylearn2.cross_validation.blocks import StackedBlocksCV from pylearn2.cross_validation.subset_iterators import ( ValidationKFold, StratifiedValidationKFold, ValidationShuffleSplit, StratifiedValidationShuffleSplit) from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix from pylearn2.datasets.transformer_dataset import TransformerDataset class DatasetCV(object): """ Construct a new DenseDesignMatrix for each subset. Parameters ---------- dataset : object Full dataset for use in cross validation. subset_iterator : iterable Iterable that returns (train, test) or (train, valid, test) indices for partitioning the dataset during cross-validation. preprocessor : Preprocessor or None Preprocessor to apply to child datasets. fit_preprocessor : bool Whether preprocessor can fit parameters when applied to training data. which_set : str, list or None If None, return all subset datasets. If one or more of 'train', 'valid', or 'test', return only the dataset(s) corresponding to the given subset(s). return_dict : bool Whether to return subset datasets as a dictionary. If True, returns a dict with keys 'train', 'valid', and/or 'test' (if subset_iterator returns two subsets per partition, 'train' and 'test' are used, and if subset_iterator returns three subsets per partition, 'train', 'valid', and 'test' are used). If False, returns a list of datasets matching the subset order given by subset_iterator. """ def __init__(self, dataset, subset_iterator, preprocessor=None, fit_preprocessor=False, which_set=None, return_dict=True): self.dataset = dataset self.subset_iterator = list(subset_iterator) # allow generator reuse dataset_iterator = dataset.iterator(mode='sequential', num_batches=1, data_specs=dataset.data_specs, return_tuple=True) self._data = dataset_iterator.next() self.preprocessor = preprocessor self.fit_preprocessor = fit_preprocessor self.which_set = which_set if which_set is not None: which_set = np.atleast_1d(which_set) assert len(which_set) for label in which_set: if label not in ['train', 'valid', 'test']: raise ValueError("Unrecognized subset '{}'".format(label)) self.which_set = which_set self.return_dict = return_dict def get_data_subsets(self): """ Partition the dataset according to cross-validation subsets and return the raw data in each subset. """ for subsets in self.subset_iterator: labels = None if len(subsets) == 3: labels = ['train', 'valid', 'test'] elif len(subsets) == 2: labels = ['train', 'test'] # data_subsets is an OrderedDict to maintain label order data_subsets = OrderedDict() for i, subset in enumerate(subsets): subset_data = tuple(data[subset] for data in self._data) if len(subset_data) == 2: X, y = subset_data else: X, = subset_data y = None data_subsets[labels[i]] = (X, y) yield data_subsets def __iter__(self): """ Create a DenseDesignMatrix for each dataset subset and apply any preprocessing to the child datasets. """ for data_subsets in self.get_data_subsets(): datasets = {} for label, data in data_subsets.items(): X, y = data datasets[label] = DenseDesignMatrix(X=X, y=y) # preprocessing if self.preprocessor is not None: self.preprocessor.apply(datasets['train'], can_fit=self.fit_preprocessor) for label, dataset in datasets.items(): if label == 'train': continue self.preprocessor.apply(dataset, can_fit=False) # which_set if self.which_set is not None: for label, dataset in list(datasets.items()): if label not in self.which_set: del datasets[label] del data_subsets[label] if not len(datasets): raise ValueError("No matching dataset(s) for " + "{}".format(self.which_set)) if not self.return_dict: # data_subsets is an OrderedDict to maintain label order datasets = list(datasets[label] for label in data_subsets.keys()) if len(datasets) == 1: datasets, = datasets yield datasets class StratifiedDatasetCV(DatasetCV): """ Subclass of DatasetCV for stratified experiments, where the relative class proportions of the full dataset are maintained in each partition. Parameters ---------- dataset : object Dataset to use in cross validation. subset_iterator : iterable Iterable that returns train/test or train/valid/test splits for partitioning the dataset during cross-validation. preprocessor : Preprocessor or None Preprocessor to apply to child datasets. fit_preprocessor : bool Whether preprocessor can fit parameters when applied to training data. which_set : str, list or None If None, return all subset datasets. If one or more of 'train', 'valid', or 'test', return only the dataset(s) corresponding to the given subset(s). return_dict : bool Whether to return subset datasets as a dictionary. If True, returns a dict with keys 'train', 'valid', and/or 'test' (if subset_iterator returns two subsets per partition, 'train' and 'test' are used, and if subset_iterator returns three subsets per partition, 'train', 'valid', and 'test' are used). If False, returns a list of datasets matching the subset order given by subset_iterator. """ @staticmethod def get_y(dataset): """ Stratified cross-validation requires label information for examples. This function gets target values for a dataset, converting from one-hot encoding to a 1D array as needed. Parameters ---------- dataset : object Dataset containing target values for examples. """ y = np.asarray(dataset.y) if y.ndim > 1: assert np.array_equal(np.unique(y), [0, 1]) y = np.argmax(y, axis=1) return y class TransformerDatasetCV(object): """ Cross-validation with dataset transformations. This class returns dataset subsets after transforming them with one or more pretrained models. Parameters ---------- dataset_iterator : DatasetCV Cross-validation dataset iterator providing train/test or train/valid/test datasets. transformers : Model or iterable Transformer model(s) to use for transforming datasets. """ def __init__(self, dataset_iterator, transformers): self.dataset_iterator = dataset_iterator self.transformers = transformers def __iter__(self): """ Construct a Transformer dataset for each partition. """ for k, datasets in enumerate(self.dataset_iterator): if isinstance(self.transformers, list): transformer = self.transformers[k] elif isinstance(self.transformers, StackedBlocksCV): transformer = self.transformers.select_fold(k) else: transformer = self.transformers if isinstance(datasets, list): for i, dataset in enumerate(datasets): datasets[i] = TransformerDataset(dataset, transformer) else: for key, dataset in datasets.items(): datasets[key] = TransformerDataset(dataset, transformer) yield datasets class DatasetKFold(DatasetCV): """ K-fold cross-validation. Parameters ---------- dataset : object Dataset to use for cross-validation. n_folds : int Number of cross-validation folds. shuffle : bool Whether to shuffle the dataset before partitioning. random_state : int or RandomState Random number generator used for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None, **kwargs): n = dataset.X.shape[0] cv = KFold(n, n_folds=n_folds, shuffle=shuffle, random_state=random_state) super(DatasetKFold, self).__init__(dataset, cv, **kwargs) class StratifiedDatasetKFold(StratifiedDatasetCV): """ Stratified K-fold cross-validation. Parameters ---------- dataset : object Dataset to use for cross-validation. n_folds : int Number of cross-validation folds. shuffle : bool Whether to shuffle the dataset before partitioning. random_state : int or RandomState Random number generator used for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None, **kwargs): y = self.get_y(dataset) try: cv = StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle, random_state=random_state) except TypeError: assert not shuffle and not random_state, ( "The 'shuffle' and 'random_state' arguments are not " + "supported by this version of sklearn. See " "http://scikit-learn.org/stable/developers/index.html" + "#git-repo for details on installing the development version.") cv = StratifiedKFold(y, n_folds=n_folds) super(StratifiedDatasetKFold, self).__init__(dataset, cv, **kwargs) class DatasetShuffleSplit(DatasetCV): """ Shuffle-split cross-validation. Parameters ---------- dataset : object Dataset to use for cross-validation. n_iter : int Number of shuffle-split iterations. test_size : float, int, or None If float, intepreted as the proportion of examples in the test set. If int, interpreted as the absolute number of examples in the test set. If None, adjusted to the complement of train_size. train_size : float, int, or None If float, intepreted as the proportion of examples in the training set. If int, interpreted as the absolute number of examples in the training set. If None, adjusted to the complement of test_size. random_state : int or RandomState Random number generator used for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None, random_state=None, **kwargs): n = dataset.X.shape[0] cv = ShuffleSplit(n, n_iter=n_iter, test_size=test_size, train_size=train_size, random_state=random_state) super(DatasetShuffleSplit, self).__init__(dataset, cv, **kwargs) class StratifiedDatasetShuffleSplit(StratifiedDatasetCV): """ Stratified shuffle-split cross-validation. Parameters ---------- dataset : object Dataset to use for cross-validation. n_iter : int Number of shuffle-split iterations. test_size : float, int, or None If float, intepreted as the proportion of examples in the test set. If int, interpreted as the absolute number of examples in the test set. If None, adjusted to the complement of train_size. train_size : float, int, or None If float, intepreted as the proportion of examples in the training set. If int, interpreted as the absolute number of examples in the training set. If None, adjusted to the complement of test_size. random_state : int or RandomState Random number generator used for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None, random_state=None, **kwargs): y = self.get_y(dataset) cv = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, train_size=train_size, random_state=random_state) super(StratifiedDatasetShuffleSplit, self).__init__(dataset, cv, **kwargs) class DatasetValidationKFold(DatasetCV): """ K-fold cross-validation with train/valid/test subsets. Parameters ---------- dataset : object Dataset to use for cross-validation. n_folds : int Number of cross-validation folds. Must be at least 3. shuffle : bool Whether to shuffle the data before splitting. random_state : int, RandomState, or None Pseudorandom number seed or generator to use for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None, **kwargs): n = dataset.X.shape[0] cv = ValidationKFold(n, n_folds, shuffle, random_state) super(DatasetValidationKFold, self).__init__(dataset, cv, **kwargs) class StratifiedDatasetValidationKFold(StratifiedDatasetCV): """ Stratified K-fold cross-validation with train/valid/test subsets. Parameters ---------- dataset : object Dataset to use for cross-validation. n_folds : int Number of cross-validation folds. Must be at least 3. shuffle : bool Whether to shuffle the data before splitting. random_state : int, RandomState, or None Pseudorandom number seed or generator to use for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None, **kwargs): y = self.get_y(dataset) cv = StratifiedValidationKFold(y, n_folds, shuffle, random_state) super(StratifiedDatasetValidationKFold, self).__init__(dataset, cv, **kwargs) class DatasetValidationShuffleSplit(DatasetCV): """ Shuffle-split cross-validation with train/valid/test subsets. Parameters ---------- dataset : object Dataset to use for cross-validation. n_iter : int Number of shuffle/split iterations. test_size : float, int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the entire dataset to include in the validation split. If int, represents the absolute number of validation samples. If None, the value is automatically set to the complement of train_size + valid_size. valid_size : float, int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the entire dataset to include in the validation split. If int, represents the absolute number of validation samples. If None, the value is automatically set to match test_size. train_size : float, int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the entire dataset to include in the validation split. If int, represents the absolute number of validation samples. If None, the value is automatically set to the complement of valid_size + test_size. random_state : int, RandomState, or None Pseudorandom number seed or generator to use for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None, train_size=None, random_state=None, **kwargs): n = dataset.X.shape[0] cv = ValidationShuffleSplit(n, n_iter, test_size, valid_size, train_size, random_state) super(DatasetValidationShuffleSplit, self).__init__(dataset, cv, **kwargs) class StratifiedDatasetValidationShuffleSplit(StratifiedDatasetCV): """ Stratified shuffle-split cross-validation with train/valid/test subsets. Parameters ---------- dataset : object Dataset to use for cross-validation. n_iter : int Number of shuffle/split iterations. test_size : float, int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the entire dataset to include in the validation split. If int, represents the absolute number of validation samples. If None, the value is automatically set to the complement of train_size + valid_size. valid_size : float, int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the entire dataset to include in the validation split. If int, represents the absolute number of validation samples. If None, the value is automatically set to match test_size. train_size : float, int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the entire dataset to include in the validation split. If int, represents the absolute number of validation samples. If None, the value is automatically set to the complement of valid_size + test_size. random_state : int, RandomState, or None Pseudorandom number seed or generator to use for shuffling. kwargs : dict Keyword arguments for DatasetCV. """ def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None, train_size=None, random_state=None, **kwargs): y = self.get_y(dataset) cv = StratifiedValidationShuffleSplit(y, n_iter, test_size, valid_size, train_size, random_state) super(StratifiedDatasetValidationShuffleSplit, self).__init__(dataset, cv, **kwargs)
bsd-3-clause
BhavyaLight/kaggle-predicting-Red-Hat-Business-Value
Initial_Classification_Models/knn.py
2
6045
import numpy as np import pandas as pd import time import os import argparse from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import Normalizer from sklearn.decomposition import TruncatedSVD from sklearn.neighbors import KNeighborsClassifier # File name for data set, reduced TRAIN_FILE = 'act_train_features_reduced.csv' # File name for test data set, reduced TEST_FILE = 'act_test_features_reduced.csv' # output for train data set OUTPUT ='act_train_output.csv' # Path to the output file # Non feature NON_FEATURE=['activity_id','people_id','date','people_date'] # Categorical data that is only label encoded CATEGORICAL_DATA = ['people_char_1', 'people_char_2','people_group_1', 'people_char_3', 'people_char_4', 'people_char_5', 'people_char_6', 'people_char_7', 'people_char_8', 'people_char_9', 'activity_category', 'char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9', 'char_10'] # Already in a one-hot encoded form CATEGORICAL_BINARY = ['people_char_10', 'people_char_11', 'people_char_12', 'people_char_13', 'people_char_14', 'people_char_15', 'people_char_16', 'people_char_17', 'people_char_18', 'people_char_19', 'people_char_20', 'people_char_21', 'people_char_22', 'people_char_23', 'people_char_24', 'people_char_25', 'people_char_26', 'people_char_27', 'people_char_28', 'people_char_29', 'people_char_30', 'people_char_31', 'people_char_32', 'people_char_33', 'people_char_34', 'people_char_35', 'people_char_36', 'people_char_37' ] # Continuous categories CONT = ['people_days', 'days', 'people_month', 'month', 'people_quarter', 'quarter', 'people_week', 'week', 'people_dayOfMonth', 'dayOfMonth', 'people_year', 'year', 'people_char_38'] def get_file_path(directory, filename): """ Combines file path directory/filename """ return os.path.join(directory, filename) def category_to_one_hot(dataset, non_feature, continuous_feature): """ Uses scikit learn's one hot encoding to generate sparse matrix Note: Certain models might not have sparse matrix support, do check :param dataset: the data set to one hot encode :param non_feature: A list of columns in the data set that are not features :param continuous_feature: A list of columns in the data set that are features but continuous values :return: """ ds = dataset.drop(non_feature, axis=1) boolean_column = [] counter = 0 # Find the positional index of each categorical column for column in ds.columns: if column not in continuous_feature: boolean_column.append(counter) counter += 1 grd_enc = OneHotEncoder(categorical_features=boolean_column) encoded_arr = grd_enc.fit_transform(ds) return encoded_arr def normalize_matrix(arr): """ Function normalizes a matrix :param arr: A sparse matrix to normalize :return: A normalized sparse matrix """ norm = Normalizer(copy=False) norm.fit(arr) return norm.transform(arr) def write_out(df, output): """ Writes out the data frame to rhe output file """ df[['outcome', 'activity_id']].set_index('activity_id').drop('act_0').to_csv(output) def knn(data_directory, n_neighbours): # Start reading files start = time.time() # Read data frame file_path = get_file_path(data_directory, TRAIN_FILE) train_data_df = pd.read_csv(file_path, parse_dates=["date"]) train_data_df.sort_values(by=['activity_id'], ascending=True, inplace=True) # Read train output file_path = get_file_path(data_directory, OUTPUT) train_output = pd.read_csv(file_path) train_output.sort_values(by='activity_id', ascending=True, inplace=True) # End reading files end = time.time() print("Training files read. Time taken:"+str(end-start)) # Start one hot encoding start = time.time() train_enc = category_to_one_hot(train_data_df , NON_FEATURE, CONT) end = time.time() print("One hot encoded the values. Time taken: "+str(end-start)) # Delete redundant data del train_data_df # Normalize start = time.time() train_norm = normalize_matrix(train_enc) end = time.time() print("Normalized the matrix. Time taken: "+str(end-start)) # Fit knn model start = time.time() knn = KNeighborsClassifier(n_neighbors=n_neighbours) knn.fit(train_norm, train_output['output'].as_matrix()) print("Fit the knn model on train set. Time taken: "+str(end-start)) # Delete redundant data del train_output # Read test data frame file_path = get_file_path(data_directory, TEST_FILE) test_data_df = pd.read_csv(file_path, parse_dates=["date"]) test_data_df.sort_values(by=['activity_id'], ascending=True, inplace=True) # Start one hot encoding start = time.time() test_enc = category_to_one_hot(test_data_df , NON_FEATURE, CONT) end = time.time() print("One hot encoded the values. Time taken: "+str(end-start)) # Normalize start = time.time() test_norm = normalize_matrix(test_enc) end = time.time() print("Normalized the matrix. Time taken: "+str(end-start)) # predict y_pred = knn.predict_proba(test_norm) test_data_df['outcome'] = y_pred[:, 1] file_path = get_file_path(data_directory, "KNN_results.csv") write_out(test_data_df,file_path) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Determine and graph top 10 trending places') parser.add_argument('--data_directory', default=None, help='The directory pointing to the data') parser.add_argument('--K', type=int, default=5, help='Value of k for k-neighbours') knn(**parser.parse_args().__dict__)
mit
chrisjsewell/ipymd
ipymd/shared/__init__.py
1
1251
# shared resources import os import inspect import pandas as pd from six import string_types from .. import test_data from . import atomdata from . import transformations def get_data_path(data, check_exists=False, module=test_data): """return a directory path to data within a module data : str or list of str file name or list of sub-directories and file name (e.g. ['lammps','data.txt']) """ basepath = os.path.dirname(os.path.abspath(inspect.getfile(module))) if isinstance(data, string_types): data = [data] dirpath = os.path.join(basepath, *data) if check_exists: assert os.path.exists(dirpath), '{0} does not exist'.format(dirpath) return dirpath def atom_data(): """return a dataframe of atomic data """ path = get_data_path('element.txt',module=atomdata) df = pd.read_csv(path,comment='#') df.set_index('Symb',inplace=True) red = df.Red*255 green = df.Green*255 blue = df.Blue*255 df['color'] = zip(red.values.astype(int), green.values.astype(int), blue.values.astype(int)) df.drop(['Red','Green','Blue'],axis=1,inplace=True) return df
gpl-3.0
fengzhyuan/scikit-learn
examples/cluster/plot_lena_segmentation.py
271
2444
""" ========================================= Segmenting the picture of Lena in regions ========================================= This example uses :ref:`spectral_clustering` on a graph created from voxel-to-voxel difference on an image to break this image into multiple partly-homogeneous regions. This procedure (spectral clustering on an image) is an efficient approximate solution for finding normalized graph cuts. There are two options to assign labels: * with 'kmeans' spectral clustering will cluster samples in the embedding space using a kmeans algorithm * whereas 'discrete' will iteratively search for the closest partition space to the embedding space. """ print(__doc__) # Author: Gael Varoquaux <[email protected]>, Brian Cheung # License: BSD 3 clause import time import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn.feature_extraction import image from sklearn.cluster import spectral_clustering lena = sp.misc.lena() # Downsample the image by a factor of 4 lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2] lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2] # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(lena) # Take a decreasing function of the gradient: an exponential # The smaller beta is, the more independent the segmentation is of the # actual image. For beta=1, the segmentation is close to a voronoi beta = 5 eps = 1e-6 graph.data = np.exp(-beta * graph.data / lena.std()) + eps # Apply spectral clustering (this step goes much faster if you have pyamg # installed) N_REGIONS = 11 ############################################################################### # Visualize the resulting regions for assign_labels in ('kmeans', 'discretize'): t0 = time.time() labels = spectral_clustering(graph, n_clusters=N_REGIONS, assign_labels=assign_labels, random_state=1) t1 = time.time() labels = labels.reshape(lena.shape) plt.figure(figsize=(5, 5)) plt.imshow(lena, cmap=plt.cm.gray) for l in range(N_REGIONS): plt.contour(labels == l, contours=1, colors=[plt.cm.spectral(l / float(N_REGIONS)), ]) plt.xticks(()) plt.yticks(()) plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))) plt.show()
bsd-3-clause
daemonmaker/pylearn2
pylearn2/utils/image.py
39
18841
""" Utility functions for working with images. """ import logging import numpy as np plt = None axes = None from theano.compat.six.moves import xrange from theano.compat.six import string_types import warnings try: import matplotlib.pyplot as plt import matplotlib.axes except (RuntimeError, ImportError, TypeError) as matplotlib_exception: warnings.warn("Unable to import matplotlib. Some features unavailable. " "Original exception: " + str(matplotlib_exception)) import os try: from PIL import Image except ImportError: Image = None from pylearn2.utils import string_utils as string from pylearn2.utils.exc import reraise_as from tempfile import mkstemp from multiprocessing import Process import subprocess logger = logging.getLogger(__name__) def ensure_Image(): """Makes sure Image has been imported from PIL""" global Image if Image is None: raise RuntimeError("You are trying to use PIL-dependent functionality" " but don't have PIL installed.") def imview(*args, **kwargs): """ A matplotlib-based image viewer command, wrapping `matplotlib.pyplot.imshow` but behaving more sensibly. Parameters ---------- figure : TODO TODO: write parameters section using decorators to inherit the matplotlib docstring Notes ----- Parameters are identical to `matplotlib.pyplot.imshow` but this behaves somewhat differently: * By default, it creates a new figure (unless a `figure` keyword argument is supplied. * It modifies the axes of that figure to use the full frame, without ticks or tick labels. * It turns on `nearest` interpolation by default (i.e., it does not antialias pixel data). This can be overridden with the `interpolation` argument as in `imshow`. All other arguments and keyword arguments are passed on to `imshow`.` """ if 'figure' not in kwargs: f = plt.figure() else: f = kwargs['figure'] new_ax = matplotlib.axes.Axes(f, [0, 0, 1, 1], xticks=[], yticks=[], frame_on=False) f.delaxes(f.gca()) f.add_axes(new_ax) if len(args) < 5 and 'interpolation' not in kwargs: kwargs['interpolation'] = 'nearest' plt.imshow(*args, **kwargs) def imview_async(*args, **kwargs): """ A version of `imview` that forks a separate process and immediately shows the image. Parameters ---------- window_title : str TODO: writeme with decorators to inherit the other imviews' docstrings Notes ----- Supports the `window_title` keyword argument to cope with the title always being 'Figure 1'. Returns the `multiprocessing.Process` handle. """ if 'figure' in kwargs: raise ValueError("passing a figure argument not supported") def fork_image_viewer(): f = plt.figure() kwargs['figure'] = f imview(*args, **kwargs) if 'window_title' in kwargs: f.set_window_title(kwargs['window_title']) plt.show() p = Process(None, fork_image_viewer) p.start() return p def show(image): """ .. todo:: WRITEME Parameters ---------- image : PIL Image object or ndarray If ndarray, integer formats are assumed to use 0-255 and float formats are assumed to use 0-1 """ viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}') if viewer_command == 'inline': return imview(image) if hasattr(image, '__array__'): # do some shape checking because PIL just raises a tuple indexing error # that doesn't make it very clear what the problem is if len(image.shape) < 2 or len(image.shape) > 3: raise ValueError('image must have either 2 or 3 dimensions but its' ' shape is ' + str(image.shape)) # The below is a temporary workaround that prevents us from crashing # 3rd party image viewers such as eog by writing out overly large # images. # In the long run we should determine if this is a bug in PIL when # producing # such images or a bug in eog and determine a proper fix. # Since this is hopefully just a short term workaround the # constants below are not included in the interface to the # function, so that 3rd party code won't start passing them. max_height = 4096 max_width = 4096 # Display separate warnings for each direction, since it's # common to crop only one. if image.shape[0] > max_height: image = image[0:max_height, :, :] warnings.warn("Cropping image to smaller height to avoid crashing " "the viewer program.") if image.shape[0] > max_width: image = image[:, 0:max_width, :] warnings.warn("Cropping the image to a smaller width to avoid " "crashing the viewer program.") # This ends the workaround if image.dtype == 'int8': image = np.cast['uint8'](image) elif str(image.dtype).startswith('float'): # don't use *=, we don't want to modify the input array image = image * 255. image = np.cast['uint8'](image) # PIL is too stupid to handle single-channel arrays if len(image.shape) == 3 and image.shape[2] == 1: image = image[:, :, 0] try: ensure_Image() image = Image.fromarray(image) except TypeError: reraise_as(TypeError("PIL issued TypeError on ndarray of shape " + str(image.shape) + " and dtype " + str(image.dtype))) # Create a temporary file with the suffix '.png'. fd, name = mkstemp(suffix='.png') os.close(fd) # Note: # Although we can use tempfile.NamedTemporaryFile() to create # a temporary file, the function should be used with care. # # In Python earlier than 2.7, a temporary file created by the # function will be deleted just after the file is closed. # We can re-use the name of the temporary file, but there is an # instant where a file with the name does not exist in the file # system before we re-use the name. This may cause a race # condition. # # In Python 2.7 or later, tempfile.NamedTemporaryFile() has # the 'delete' argument which can control whether a temporary # file will be automatically deleted or not. With the argument, # the above race condition can be avoided. # image.save(name) if os.name == 'nt': subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name, shell=True) else: subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name, shell=True) def pil_from_ndarray(ndarray): """ Converts an ndarray to a PIL image. Parameters ---------- ndarray : ndarray An ndarray containing an image. Returns ------- pil : PIL Image A PIL Image containing the image. """ try: if ndarray.dtype == 'float32' or ndarray.dtype == 'float64': assert ndarray.min() >= 0.0 assert ndarray.max() <= 1.0 ndarray = np.cast['uint8'](ndarray * 255) if len(ndarray.shape) == 3 and ndarray.shape[2] == 1: ndarray = ndarray[:, :, 0] ensure_Image() rval = Image.fromarray(ndarray) return rval except Exception as e: logger.exception('original exception: ') logger.exception(e) logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype)) logger.exception('ndarray.shape: {0}'.format(ndarray.shape)) raise assert False def ndarray_from_pil(pil, dtype='uint8'): """ Converts a PIL Image to an ndarray. Parameters ---------- pil : PIL Image An image represented as a PIL Image object dtype : str The dtype of ndarray to create Returns ------- ndarray : ndarray The image as an ndarray. """ rval = np.asarray(pil) if dtype != rval.dtype: rval = np.cast[dtype](rval) if str(dtype).startswith('float'): rval /= 255. if len(rval.shape) == 2: rval = rval.reshape(rval.shape[0], rval.shape[1], 1) return rval def rescale(image, shape): """ Scales image to be no larger than shape. PIL might give you unexpected results beyond that. Parameters ---------- image : WRITEME shape : WRITEME Returns ------- WRITEME """ assert len(image.shape) == 3 # rows, cols, channels assert len(shape) == 2 # rows, cols i = pil_from_ndarray(image) ensure_Image() i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS) rval = ndarray_from_pil(i, dtype=image.dtype) return rval resize = rescale def fit_inside(image, shape): """ Scales image down to fit inside shape preserves proportions of image Parameters ---------- image : WRITEME shape : WRITEME Returns ------- WRITEME """ assert len(image.shape) == 3 # rows, cols, channels assert len(shape) == 2 # rows, cols if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]: return image.copy() row_ratio = float(image.shape[0]) / float(shape[0]) col_ratio = float(image.shape[1]) / float(shape[1]) if row_ratio > col_ratio: target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])] else: target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]] assert target_shape[0] <= shape[0] assert target_shape[1] <= shape[1] assert target_shape[0] == shape[0] or target_shape[1] == shape[1] rval = rescale(image, target_shape) return rval def letterbox(image, shape): """ Pads image with black letterboxing to bring image.shape up to shape Parameters ---------- image : WRITEME shape : WRITEME Returns ------- WRITEME """ assert len(image.shape) == 3 # rows, cols, channels assert len(shape) == 2 # rows, cols assert image.shape[0] <= shape[0] assert image.shape[1] <= shape[1] if image.shape[0] == shape[0] and image.shape[1] == shape[1]: return image.copy() rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype) rstart = (shape[0] - image.shape[0]) / 2 cstart = (shape[1] - image.shape[1]) / 2 rend = rstart + image.shape[0] cend = cstart + image.shape[1] rval[rstart:rend, cstart:cend] = image return rval def make_letterboxed_thumbnail(image, shape): """ Scales image down to shape. Preserves proportions of image, introduces black letterboxing if necessary. Parameters ---------- image : WRITEME shape : WRITEME Returns ------- WRITEME """ assert len(image.shape) == 3 assert len(shape) == 2 shrunk = fit_inside(image, shape) letterboxed = letterbox(shrunk, shape) return letterboxed def load(filepath, rescale_image=True, dtype='float64'): """ Load an image from a file. Parameters ---------- filepath : str Path to the image file to load rescale_image : bool Default value: True If True, returned images have pixel values in [0, 1]. Otherwise, values are in [0, 255]. dtype: str The dtype to use for the returned value Returns ------- img : numpy ndarray An array containing the image that was in the file. """ assert isinstance(filepath, string_types) if not rescale_image and dtype == 'uint8': ensure_Image() rval = np.asarray(Image.open(filepath)) assert rval.dtype == 'uint8' return rval s = 1.0 if rescale_image: s = 255. try: ensure_Image() rval = Image.open(filepath) except Exception: reraise_as(Exception("Could not open " + filepath)) numpy_rval = np.array(rval) msg = ("Tried to load an image, got an array with %d" " dimensions. Expected 2 or 3." "This may indicate a mildly corrupted image file. Try " "converting it to a different image format with a different " "editor like gimp or imagemagic. Sometimes these programs are " "more robust to minor corruption than PIL and will emit a " "correctly formatted image in the new format.") if numpy_rval.ndim not in [2, 3]: logger.error(dir(rval)) logger.error(rval) logger.error(rval.size) rval.show() raise AssertionError(msg % numpy_rval.ndim) rval = numpy_rval rval = np.cast[dtype](rval) / s if rval.ndim == 2: rval = rval.reshape(rval.shape[0], rval.shape[1], 1) if rval.ndim != 3: raise AssertionError("Something went wrong opening " + filepath + '. Resulting shape is ' + str(rval.shape) + " (it's meant to have 3 dimensions by now)") return rval def save(filepath, ndarray): """ Saves an image to a file. Parameters ---------- filepath : str The path to write the file to. ndarray : ndarray An array containing the image to be saved. """ pil_from_ndarray(ndarray).save(filepath) def scale_to_unit_interval(ndar, eps=1e-8): """ Scales all values in the ndarray ndar to be between 0 and 1 Parameters ---------- ndar : WRITEME eps : WRITEME Returns ------- WRITEME """ ndar = ndar.copy() ndar -= ndar.min() ndar *= 1.0 / (ndar.max() + eps) return ndar def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0), scale_rows_to_unit_interval=True, output_pixel_vals=True): """ Transform an array with one flattened image per row, into an array in which images are reshaped and layed out like tiles on a floor. This function is useful for visualizing datasets whose rows are images, and also columns of matrices for transforming those rows (such as the first layer of a neural net). Parameters ---------- x : numpy.ndarray 2-d ndarray or 4 tuple of 2-d ndarrays or None for channels, in which every row is a flattened image. shape : 2-tuple of ints The first component is the height of each image, the second component is the width. tile_shape : 2-tuple of ints The number of images to tile in (row, columns) form. scale_rows_to_unit_interval : bool Whether or not the values need to be before being plotted to [0, 1]. output_pixel_vals : bool Whether or not the output should be pixel values (int8) or floats. Returns ------- y : 2d-ndarray The return value has the same dtype as X, and is suitable for viewing as an image with PIL.Image.fromarray. """ assert len(img_shape) == 2 assert len(tile_shape) == 2 assert len(tile_spacing) == 2 # The expression below can be re-written in a more C style as # follows : # # out_shape = [0,0] # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] - # tile_spacing[0] # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] - # tile_spacing[1] out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)] if isinstance(X, tuple): assert len(X) == 4 # Create an output np ndarray to store the image if output_pixel_vals: out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8') else: out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype) # colors default to 0, alpha defaults to 1 (opaque) if output_pixel_vals: channel_defaults = [0, 0, 0, 255] else: channel_defaults = [0., 0., 0., 1.] for i in xrange(4): if X[i] is None: # if channel is None, fill it with zeros of the correct # dtype dt = out_array.dtype if output_pixel_vals: dt = 'uint8' out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \ channel_defaults[i] else: # use a recurrent call to compute the channel and store it # in the output out_array[:, :, i] = tile_raster_images( X[i], img_shape, tile_shape, tile_spacing, scale_rows_to_unit_interval, output_pixel_vals) return out_array else: # if we are dealing with only one channel H, W = img_shape Hs, Ws = tile_spacing # generate a matrix to store the output dt = X.dtype if output_pixel_vals: dt = 'uint8' out_array = np.zeros(out_shape, dtype=dt) for tile_row in xrange(tile_shape[0]): for tile_col in xrange(tile_shape[1]): if tile_row * tile_shape[1] + tile_col < X.shape[0]: this_x = X[tile_row * tile_shape[1] + tile_col] if scale_rows_to_unit_interval: # if we should scale values to be between 0 and 1 # do this by calling the `scale_to_unit_interval` # function this_img = scale_to_unit_interval( this_x.reshape(img_shape)) else: this_img = this_x.reshape(img_shape) # add the slice to the corresponding position in the # output array c = 1 if output_pixel_vals: c = 255 out_array[ tile_row * (H + Hs): tile_row * (H + Hs) + H, tile_col * (W + Ws): tile_col * (W + Ws) + W ] = this_img * c return out_array if __name__ == '__main__': black = np.zeros((50, 50, 3), dtype='uint8') red = black.copy() red[:, :, 0] = 255 green = black.copy() green[:, :, 1] = 255 show(black) show(green) show(red)
bsd-3-clause
thunderhoser/GewitterGefahr
gewittergefahr/dissertation/plot_gridrad_domains.py
1
16055
"""Plots GridRad domains. Specifically, plots number of convective days with GridRad data at each grid point. """ import os.path import argparse import numpy import matplotlib matplotlib.use('agg') from matplotlib import pyplot from mpl_toolkits.basemap import Basemap from gewittergefahr.gg_io import gridrad_io from gewittergefahr.gg_utils import grids from gewittergefahr.gg_utils import projections from gewittergefahr.gg_utils import radar_utils from gewittergefahr.gg_utils import time_conversion from gewittergefahr.gg_utils import time_periods from gewittergefahr.gg_utils import file_system_utils from gewittergefahr.plotting import plotting_utils TOLERANCE = 1e-6 SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n' TIME_INTERVAL_SEC = 300 OVERALL_MIN_LATITUDE_DEG = 20. OVERALL_MAX_LATITUDE_DEG = 55. OVERALL_MIN_LONGITUDE_DEG = 230. OVERALL_MAX_LONGITUDE_DEG = 300. LAMBERT_CONFORMAL_STRING = 'lcc' NUM_PARALLELS = 8 NUM_MERIDIANS = 6 RESOLUTION_STRING = 'l' BORDER_COLOUR = numpy.full(3, 0.) FIGURE_WIDTH_INCHES = 15 FIGURE_HEIGHT_INCHES = 15 FIGURE_RESOLUTION_DPI = 300 INPUT_DIR_ARG_NAME = 'input_gridrad_dir_name' FIRST_DATE_ARG_NAME = 'first_spc_date_string' LAST_DATE_ARG_NAME = 'last_spc_date_string' COLOUR_MAP_ARG_NAME = 'colour_map_name' GRID_SPACING_ARG_NAME = 'grid_spacing_metres' OUTPUT_FILE_ARG_NAME = 'output_file_name' INPUT_DIR_HELP_STRING = ( 'Name of top-level input directory. GridRad files therein will be found by' ' `gridrad_io.find_file` and read by ' '`gridrad_io.read_field_from_full_grid_file`.') SPC_DATE_HELP_STRING = ( 'SPC date or convective day (format "yyyymmdd"). This script will look for' ' GridRad files in the period `{0:s}`...`{1:s}`.' ).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME) COLOUR_MAP_HELP_STRING = ( 'Name of colour scheme for gridded plot (must be accepted by ' '`pyplot.get_cmap`).') GRID_SPACING_HELP_STRING = 'Spacing (metres) of Lambert conformal grid.' OUTPUT_FILE_HELP_STRING = 'Path to output file. Figure will be saved here.' INPUT_ARG_PARSER = argparse.ArgumentParser() INPUT_ARG_PARSER.add_argument( '--' + INPUT_DIR_ARG_NAME, type=str, required=True, help=INPUT_DIR_HELP_STRING) INPUT_ARG_PARSER.add_argument( '--' + FIRST_DATE_ARG_NAME, type=str, required=True, help=SPC_DATE_HELP_STRING) INPUT_ARG_PARSER.add_argument( '--' + LAST_DATE_ARG_NAME, type=str, required=True, help=SPC_DATE_HELP_STRING) INPUT_ARG_PARSER.add_argument( '--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='YlOrRd', help=COLOUR_MAP_HELP_STRING) INPUT_ARG_PARSER.add_argument( '--' + GRID_SPACING_ARG_NAME, type=float, required=False, default=1e5, help=GRID_SPACING_HELP_STRING) INPUT_ARG_PARSER.add_argument( '--' + OUTPUT_FILE_ARG_NAME, type=str, required=True, help=OUTPUT_FILE_HELP_STRING) def _get_domain_one_file(gridrad_file_name): """Returns spatial domain for one file. :param gridrad_file_name: Path to input file. :return: domain_limits_deg: length-4 numpy array with [min latitude, max latitude, min longitude, max longitude]. Latitudes are in deg N, and longitudes are in deg E. """ print('Reading metadata from: "{0:s}"...'.format(gridrad_file_name)) metadata_dict = gridrad_io.read_metadata_from_full_grid_file( gridrad_file_name) max_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] min_longitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN] latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN] longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN] num_rows = metadata_dict[radar_utils.NUM_LAT_COLUMN] num_columns = metadata_dict[radar_utils.NUM_LNG_COLUMN] min_latitude_deg = max_latitude_deg - (num_rows - 1) * latitude_spacing_deg max_longitude_deg = min_longitude_deg + ( (num_columns - 1) * longitude_spacing_deg ) return numpy.array([ min_latitude_deg, max_latitude_deg, min_longitude_deg, max_longitude_deg ]) def _get_lcc_params(projection_object): """Finds parameters for LCC (Lambert conformal conic) projection. :param projection_object: Instance of `pyproj.Proj`. :return: standard_latitudes_deg: length-2 numpy array of standard latitudes (deg N). :return: central_longitude_deg: Central longitude (deg E). :raises: ValueError: if projection is not LCC. """ projection_string = projection_object.srs words = projection_string.split() property_names = [w.split('=')[0][1:] for w in words] property_values = [w.split('=')[1] for w in words] projection_dict = dict(list( zip(property_names, property_values) )) if projection_dict['proj'] != LAMBERT_CONFORMAL_STRING: error_string = 'Grid projection should be "{0:s}", not "{1:s}".'.format( LAMBERT_CONFORMAL_STRING, projection_dict['proj'] ) raise ValueError(error_string) central_longitude_deg = float(projection_dict['lon_0']) standard_latitudes_deg = numpy.array([ float(projection_dict['lat_1']), float(projection_dict['lat_2']) ]) return standard_latitudes_deg, central_longitude_deg def _get_basemap(grid_metadata_dict): """Creates basemap. M = number of rows in grid M = number of columns in grid :param grid_metadata_dict: Dictionary created by `grids.create_equidistant_grid`. :return: basemap_object: Basemap handle (instance of `mpl_toolkits.basemap.Basemap`). :return: basemap_x_matrix_metres: M-by-N numpy array of x-coordinates under Basemap projection (different than pyproj projection). :return: basemap_y_matrix_metres: Same but for y-coordinates. """ x_matrix_metres, y_matrix_metres = grids.xy_vectors_to_matrices( x_unique_metres=grid_metadata_dict[grids.X_COORDS_KEY], y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY] ) projection_object = grid_metadata_dict[grids.PROJECTION_KEY] latitude_matrix_deg, longitude_matrix_deg = ( projections.project_xy_to_latlng( x_coords_metres=x_matrix_metres, y_coords_metres=y_matrix_metres, projection_object=projection_object) ) standard_latitudes_deg, central_longitude_deg = _get_lcc_params( projection_object) basemap_object = Basemap( projection='lcc', lat_1=standard_latitudes_deg[0], lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg, rsphere=projections.DEFAULT_EARTH_RADIUS_METRES, ellps=projections.SPHERE_NAME, resolution=RESOLUTION_STRING, llcrnrx=x_matrix_metres[0, 0], llcrnry=y_matrix_metres[0, 0], urcrnrx=x_matrix_metres[-1, -1], urcrnry=y_matrix_metres[-1, -1] ) basemap_x_matrix_metres, basemap_y_matrix_metres = basemap_object( longitude_matrix_deg, latitude_matrix_deg) return basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres def _plot_data(num_days_matrix, grid_metadata_dict, colour_map_object): """Plots data. M = number of rows in grid N = number of columns in grid :param num_days_matrix: M-by-N numpy array with number of convective days for which grid cell is in domain. :param grid_metadata_dict: Dictionary created by `grids.create_equidistant_grid`. :param colour_map_object: See documentation at top of file. :return: figure_object: Figure handle (instance of `matplotlib.figure.Figure`). :return: axes_object: Axes handle (instance of `matplotlib.axes._subplots.AxesSubplot`). """ figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = ( _get_basemap(grid_metadata_dict) ) num_grid_rows = num_days_matrix.shape[0] num_grid_columns = num_days_matrix.shape[1] x_spacing_metres = ( (basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) / (num_grid_columns - 1) ) y_spacing_metres = ( (basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) / (num_grid_rows - 1) ) matrix_to_plot, edge_x_coords_metres, edge_y_coords_metres = ( grids.xy_field_grid_points_to_edges( field_matrix=num_days_matrix, x_min_metres=basemap_x_matrix_metres[0, 0], y_min_metres=basemap_y_matrix_metres[0, 0], x_spacing_metres=x_spacing_metres, y_spacing_metres=y_spacing_metres) ) matrix_to_plot = numpy.ma.masked_where(matrix_to_plot == 0, matrix_to_plot) plotting_utils.plot_coastlines( basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR) plotting_utils.plot_countries( basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR) plotting_utils.plot_states_and_provinces( basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR) plotting_utils.plot_parallels( basemap_object=basemap_object, axes_object=axes_object, num_parallels=NUM_PARALLELS) plotting_utils.plot_meridians( basemap_object=basemap_object, axes_object=axes_object, num_meridians=NUM_MERIDIANS) basemap_object.pcolormesh( edge_x_coords_metres, edge_y_coords_metres, matrix_to_plot, cmap=colour_map_object, vmin=1, vmax=numpy.max(num_days_matrix), shading='flat', edgecolors='None', axes=axes_object, zorder=-1e12) colour_bar_object = plotting_utils.plot_linear_colour_bar( axes_object_or_matrix=axes_object, data_matrix=num_days_matrix, colour_map_object=colour_map_object, min_value=1, max_value=numpy.max(num_days_matrix), orientation_string='horizontal', extend_min=False, extend_max=False, padding=0.05) tick_values = colour_bar_object.get_ticks() tick_strings = ['{0:d}'.format(int(numpy.round(v))) for v in tick_values] colour_bar_object.set_ticks(tick_values) colour_bar_object.set_ticklabels(tick_strings) axes_object.set_title('Number of convective days by grid cell') return figure_object, axes_object def _run(top_gridrad_dir_name, first_spc_date_string, last_spc_date_string, colour_map_name, grid_spacing_metres, output_file_name): """Plots GridRad domains. This is effectively the main method. :param top_gridrad_dir_name: See documentation at top of file. :param first_spc_date_string: Same. :param last_spc_date_string: Same. :param colour_map_name: Same. :param grid_spacing_metres: Same. :param output_file_name: Same. """ colour_map_object = pyplot.get_cmap(colour_map_name) file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name) first_time_unix_sec = time_conversion.get_start_of_spc_date( first_spc_date_string) last_time_unix_sec = time_conversion.get_end_of_spc_date( last_spc_date_string) valid_times_unix_sec = time_periods.range_and_interval_to_list( start_time_unix_sec=first_time_unix_sec, end_time_unix_sec=last_time_unix_sec, time_interval_sec=TIME_INTERVAL_SEC, include_endpoint=True) valid_spc_date_strings = [ time_conversion.time_to_spc_date_string(t) for t in valid_times_unix_sec ] domain_min_latitudes_deg = [] domain_max_latitudes_deg = [] domain_min_longitudes_deg = [] domain_max_longitudes_deg = [] prev_domain_limits_deg = numpy.full(4, numpy.nan) prev_spc_date_string = 'foo' num_times = len(valid_times_unix_sec) for i in range(num_times): this_gridrad_file_name = gridrad_io.find_file( unix_time_sec=valid_times_unix_sec[i], top_directory_name=top_gridrad_dir_name, raise_error_if_missing=False) if not os.path.isfile(this_gridrad_file_name): continue these_domain_limits_deg = _get_domain_one_file(this_gridrad_file_name) same_domain = ( valid_spc_date_strings[i] == prev_spc_date_string and numpy.allclose( these_domain_limits_deg, prev_domain_limits_deg, TOLERANCE ) ) if same_domain: continue prev_domain_limits_deg = these_domain_limits_deg + 0. prev_spc_date_string = valid_spc_date_strings[i] domain_min_latitudes_deg.append(these_domain_limits_deg[0]) domain_max_latitudes_deg.append(these_domain_limits_deg[1]) domain_min_longitudes_deg.append(these_domain_limits_deg[2]) domain_max_longitudes_deg.append(these_domain_limits_deg[3]) print(SEPARATOR_STRING) domain_min_latitudes_deg = numpy.array(domain_min_latitudes_deg) domain_max_latitudes_deg = numpy.array(domain_max_latitudes_deg) domain_min_longitudes_deg = numpy.array(domain_min_longitudes_deg) domain_max_longitudes_deg = numpy.array(domain_max_longitudes_deg) num_domains = len(domain_min_latitudes_deg) grid_metadata_dict = grids.create_equidistant_grid( min_latitude_deg=OVERALL_MIN_LATITUDE_DEG, max_latitude_deg=OVERALL_MAX_LATITUDE_DEG, min_longitude_deg=OVERALL_MIN_LONGITUDE_DEG, max_longitude_deg=OVERALL_MAX_LONGITUDE_DEG, x_spacing_metres=grid_spacing_metres, y_spacing_metres=grid_spacing_metres, azimuthal=False) unique_x_coords_metres = grid_metadata_dict[grids.X_COORDS_KEY] unique_y_coords_metres = grid_metadata_dict[grids.Y_COORDS_KEY] projection_object = grid_metadata_dict[grids.PROJECTION_KEY] x_coord_matrix_metres, y_coord_matrix_metres = grids.xy_vectors_to_matrices( x_unique_metres=unique_x_coords_metres, y_unique_metres=unique_y_coords_metres) latitude_matrix_deg, longitude_matrix_deg = ( projections.project_xy_to_latlng( x_coords_metres=x_coord_matrix_metres, y_coords_metres=y_coord_matrix_metres, projection_object=projection_object) ) num_grid_rows = latitude_matrix_deg.shape[0] num_grid_columns = latitude_matrix_deg.shape[1] num_days_matrix = numpy.full((num_grid_rows, num_grid_columns), 0) for i in range(num_domains): if numpy.mod(i, 10) == 0: print('Have found grid points in {0:d} of {1:d} domains...'.format( i, num_domains )) this_lat_flag_matrix = numpy.logical_and( latitude_matrix_deg >= domain_min_latitudes_deg[i], latitude_matrix_deg <= domain_max_latitudes_deg[i] ) this_lng_flag_matrix = numpy.logical_and( longitude_matrix_deg >= domain_min_longitudes_deg[i], longitude_matrix_deg <= domain_max_longitudes_deg[i] ) num_days_matrix += numpy.logical_and( this_lat_flag_matrix, this_lng_flag_matrix ).astype(int) print(SEPARATOR_STRING) figure_object, axes_object = _plot_data( num_days_matrix=num_days_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=colour_map_object) plotting_utils.label_axes(axes_object=axes_object, label_string='(c)') print('Saving figure to: "{0:s}"...'.format(output_file_name)) figure_object.savefig( output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) if __name__ == '__main__': INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args() _run( top_gridrad_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME), first_spc_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME), last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME), colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME), grid_spacing_metres=getattr(INPUT_ARG_OBJECT, GRID_SPACING_ARG_NAME), output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME) )
mit
yuxng/Deep_ISM
ISM/lib/utils/voxelizer.py
1
7831
# -------------------------------------------------------- # FCN # Copyright (c) 2016 # Licensed under The MIT License [see LICENSE for details] # Written by Yu Xiang # -------------------------------------------------------- from ism.config import cfg import numpy as np class Voxelizer(object): def __init__(self, grid_size, num_classes): self.grid_size = grid_size self.num_classes = num_classes self.margin = 0.3 self.min_x = 0 self.min_y = 0 self.min_z = 0 self.max_x = 0 self.max_y = 0 self.max_z = 0 self.step_x = 0 self.step_y = 0 self.step_z = 0 self.voxelized = False self.height = 0 self.width = 0 def setup(self, min_x, min_y, min_z, max_x, max_y, max_z): self.min_x = min_x self.min_y = min_y self.min_z = min_z self.max_x = max_x self.max_y = max_y self.max_z = max_z # step size self.step_x = (max_x - min_x) / self.grid_size self.step_y = (max_y - min_y) / self.grid_size self.step_z = (max_z - min_z) / self.grid_size self.voxelized = True def draw(self, labels, colors, ax): for i in range(1, len(colors)): index = np.where(labels == i) X = index[0] * self.step_x + self.min_x Y = index[1] * self.step_y + self.min_y Z = index[2] * self.step_z + self.min_z ax.scatter(X, Y, Z, c=colors[i], marker='o') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') set_axes_equal(ax) def reset(self): self.min_x = 0 self.min_y = 0 self.min_z = 0 self.max_x = 0 self.max_y = 0 self.max_z = 0 self.step_x = 0 self.step_y = 0 self.step_z = 0 self.voxelized = False def voxelize(self, points): if not self.voxelized: # compute the boundary of the 3D points Xmin = np.nanmin(points[0,:]) - self.margin Xmax = np.nanmax(points[0,:]) + self.margin Ymin = np.nanmin(points[1,:]) - self.margin Ymax = np.nanmax(points[1,:]) + self.margin Zmin = np.nanmin(points[2,:]) - self.margin Zmax = np.nanmax(points[2,:]) + self.margin self.min_x = Xmin self.min_y = Ymin self.min_z = Zmin self.max_x = Xmax self.max_y = Ymax self.max_z = Zmax # step size self.step_x = (Xmax-Xmin) / self.grid_size self.step_y = (Ymax-Ymin) / self.grid_size self.step_z = (Zmax-Zmin) / self.grid_size self.voxelized = True # compute grid indexes indexes = np.zeros_like(points, dtype=np.float32) indexes[0,:] = np.floor((points[0,:] - self.min_x) / self.step_x) indexes[1,:] = np.floor((points[1,:] - self.min_y) / self.step_y) indexes[2,:] = np.floor((points[2,:] - self.min_z) / self.step_z) # crash the grid indexes # grid_indexes = indexes[0,:] * self.grid_size * self.grid_size + indexes[1,:] * self.grid_size + indexes[2,:] # I = np.isnan(grid_indexes) # grid_indexes[I] = -1 # grid_indexes = grid_indexes.reshape(self.height, self.width).astype(np.int32) return indexes # backproject pixels into 3D points def backproject(self, im_depth, meta_data): depth = im_depth.astype(np.float32, copy=True) / meta_data['factor_depth'] # compute projection matrix P = meta_data['projection_matrix'] P = np.matrix(P) Pinv = np.linalg.pinv(P) # compute the 3D points height = depth.shape[0] width = depth.shape[1] self.height = height self.width = width # camera location C = meta_data['camera_location'] C = np.matrix(C).transpose() Cmat = np.tile(C, (1, width*height)) # construct the 2D points matrix x, y = np.meshgrid(np.arange(width), np.arange(height)) ones = np.ones((height, width), dtype=np.float32) x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3) # backprojection x3d = Pinv * x2d.transpose() x3d[0,:] = x3d[0,:] / x3d[3,:] x3d[1,:] = x3d[1,:] / x3d[3,:] x3d[2,:] = x3d[2,:] / x3d[3,:] x3d = x3d[:3,:] # compute the ray R = x3d - Cmat # compute the norm N = np.linalg.norm(R, axis=0) # normalization R = np.divide(R, np.tile(N, (3,1))) # compute the 3D points X = Cmat + np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R) # mask index = np.where(im_depth.flatten() == 0) X[:,index] = np.nan return np.array(X) # backproject pixels into 3D points in camera's coordinate system def backproject_camera(self, im_depth, meta_data): depth = im_depth.astype(np.float32, copy=True) / meta_data['factor_depth'] # get intrinsic matrix K = meta_data['intrinsic_matrix'] K = np.matrix(K) Kinv = np.linalg.inv(K) if cfg.FLIP_X: Kinv[0, 0] = -1 * Kinv[0, 0] Kinv[0, 2] = -1 * Kinv[0, 2] # compute the 3D points width = depth.shape[1] height = depth.shape[0] # construct the 2D points matrix x, y = np.meshgrid(np.arange(width), np.arange(height)) ones = np.ones((height, width), dtype=np.float32) x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3) # backprojection R = Kinv * x2d.transpose() # compute the 3D points X = np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R) # mask index = np.where(im_depth.flatten() == 0) X[:,index] = np.nan return np.array(X) def check_points(self, points, pose): # transform the points R = pose[0:3, 0:3] T = pose[0:3, 3].reshape((3,1)) points = np.dot(R, points) + np.tile(T, (1, points.shape[1])) Xmin = np.nanmin(points[0,:]) Xmax = np.nanmax(points[0,:]) Ymin = np.nanmin(points[1,:]) Ymax = np.nanmax(points[1,:]) Zmin = np.nanmin(points[2,:]) Zmax = np.nanmax(points[2,:]) if Xmin >= self.min_x and Xmax <= self.max_x and Ymin >= self.min_y and Ymax <= self.max_y and Zmin >= self.min_z and Zmax <= self.max_z: return True else: print 'points x limit: {} {}'.format(Xmin, Xmax) print 'points y limit: {} {}'.format(Ymin, Ymax) print 'points z limit: {} {}'.format(Zmin, Zmax) return False def set_axes_equal(ax): '''Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D. Input ax: a matplotlib axis, e.g., as output from plt.gca(). ''' x_limits = ax.get_xlim3d() y_limits = ax.get_ylim3d() z_limits = ax.get_zlim3d() x_range = abs(x_limits[1] - x_limits[0]) x_middle = np.mean(x_limits) y_range = abs(y_limits[1] - y_limits[0]) y_middle = np.mean(y_limits) z_range = abs(z_limits[1] - z_limits[0]) z_middle = np.mean(z_limits) # The plot bounding box is a sphere in the sense of the infinity # norm, hence I call half the max range the plot radius. plot_radius = 0.5*max([x_range, y_range, z_range]) ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius]) ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius]) ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
mit
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/metrics/tests/test_metrics.py
1
104453
from __future__ import division, print_function import numpy as np from functools import partial from itertools import product import warnings from sklearn import datasets from sklearn import svm from sklearn import ensemble from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer from sklearn.datasets import make_multilabel_classification from sklearn.utils import check_random_state, shuffle from sklearn.utils.multiclass import unique_labels from sklearn.utils.fixes import np_version from sklearn.utils.multiclass import type_of_target from sklearn.utils.testing import (assert_true, assert_raises, assert_raise_message, assert_equal, assert_almost_equal, assert_not_equal, assert_array_equal, assert_array_almost_equal, assert_warns, assert_no_warnings, assert_greater, ignore_warnings, assert_warns_message) from sklearn.metrics import (accuracy_score, average_precision_score, auc, auc_score, classification_report, confusion_matrix, explained_variance_score, f1_score, fbeta_score, hamming_loss, hinge_loss, jaccard_similarity_score, log_loss, matthews_corrcoef, mean_squared_error, mean_absolute_error, precision_recall_curve, precision_recall_fscore_support, precision_score, recall_score, r2_score, roc_auc_score, roc_curve, zero_one_loss) from sklearn.metrics.metrics import _average_binary_score from sklearn.metrics.metrics import _check_clf_targets from sklearn.metrics.metrics import _check_reg_targets from sklearn.metrics.metrics import UndefinedMetricWarning from sklearn.externals.six.moves import xrange # Note toward developers about metric testing # ------------------------------------------- # It is often possible to write one general test for several metrics: # # - invariance properties, e.g. invariance to sample order # - common behavior for an argument, e.g. the "normalize" with value True # will return the mean of the metrics and with value False will return # the sum of the metrics. # # In order to improve the overall metric testing, it is a good idea to write # first a specific test for the given metric and then add a general test for # all metrics that have the same behavior. # # Two types of datastructures are used in order to implement this system: # dictionaries of metrics and lists of metrics wit common properties. # # Dictionaries of metrics # ------------------------ # The goal of having those dictionaries is to have an easy way to call a # particular metric and associate a name to each function: # # - REGRESSION_METRICS: all regression metrics. # - CLASSIFICATION_METRICS: all classification metrics # which compare a ground truth and the estimated targets as returned by a # classifier. # - THRESHOLDED_METRICS: all classification metrics which # compare a ground truth and a score, e.g. estimated probabilities or # decision function (format might vary) # # Those dictionaries will be used to test systematically some invariance # properties, e.g. invariance toward several input layout. # REGRESSION_METRICS = { "mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "explained_variance_score": explained_variance_score, "r2_score": r2_score, } CLASSIFICATION_METRICS = { "accuracy_score": accuracy_score, "unnormalized_accuracy_score": partial(accuracy_score, normalize=False), "confusion_matrix": confusion_matrix, "hamming_loss": hamming_loss, "jaccard_similarity_score": jaccard_similarity_score, "unnormalized_jaccard_similarity_score": partial(jaccard_similarity_score, normalize=False), "zero_one_loss": zero_one_loss, "unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False), "precision_score": precision_score, "recall_score": recall_score, "f1_score": f1_score, "f2_score": partial(fbeta_score, beta=2), "f0.5_score": partial(fbeta_score, beta=0.5), "matthews_corrcoef_score": matthews_corrcoef, "weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5), "weighted_f1_score": partial(f1_score, average="weighted"), "weighted_f2_score": partial(fbeta_score, average="weighted", beta=2), "weighted_precision_score": partial(precision_score, average="weighted"), "weighted_recall_score": partial(recall_score, average="weighted"), "micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5), "micro_f1_score": partial(f1_score, average="micro"), "micro_f2_score": partial(fbeta_score, average="micro", beta=2), "micro_precision_score": partial(precision_score, average="micro"), "micro_recall_score": partial(recall_score, average="micro"), "macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5), "macro_f1_score": partial(f1_score, average="macro"), "macro_f2_score": partial(fbeta_score, average="macro", beta=2), "macro_precision_score": partial(precision_score, average="macro"), "macro_recall_score": partial(recall_score, average="macro"), "samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5), "samples_f1_score": partial(f1_score, average="samples"), "samples_f2_score": partial(fbeta_score, average="samples", beta=2), "samples_precision_score": partial(precision_score, average="samples"), "samples_recall_score": partial(recall_score, average="samples"), } THRESHOLDED_METRICS = { "log_loss": log_loss, "hinge_loss": hinge_loss, "roc_auc_score": roc_auc_score, "weighted_roc_auc": partial(roc_auc_score, average="weighted"), "samples_roc_auc": partial(roc_auc_score, average="samples"), "micro_roc_auc": partial(roc_auc_score, average="micro"), "macro_roc_auc": partial(roc_auc_score, average="macro"), "average_precision_score": average_precision_score, "weighted_average_precision_score": partial(average_precision_score, average="weighted"), "samples_average_precision_score": partial(average_precision_score, average="samples"), "micro_average_precision_score": partial(average_precision_score, average="micro"), "macro_average_precision_score": partial(average_precision_score, average="macro"), } ALL_METRICS = dict() ALL_METRICS.update(THRESHOLDED_METRICS) ALL_METRICS.update(CLASSIFICATION_METRICS) ALL_METRICS.update(REGRESSION_METRICS) # Lists of metrics with common properties # --------------------------------------- # Lists of metrics with common properties are used to test systematically some # functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that # are symmetric with respect to their input argument y_true and y_pred. # # When you add a new metric or functionality, check if a general test # is already written. # Metric undefined with "binary" or "multiclass" input METRIC_UNDEFINED_MULTICLASS = [ "samples_f0.5_score", "samples_f1_score", "samples_f2_score", "samples_precision_score", "samples_recall_score", # Those metrics don't support multiclass outputs "average_precision_score", "weighted_average_precision_score", "micro_average_precision_score", "macro_average_precision_score", "samples_average_precision_score", "roc_auc_score", "micro_roc_auc", "weighted_roc_auc", "macro_roc_auc", "samples_roc_auc", ] # Metrics with an "average" argument METRICS_WITH_AVERAGING = [ "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score" ] # Treshold-based metrics with an "average" argument THRESHOLDED_METRICS_WITH_AVERAGING = [ "roc_auc_score", "average_precision_score", ] # Metrics with a "pos_label" argument METRICS_WITH_POS_LABEL = [ "roc_curve", "hinge_loss", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", ] # Metrics with a "labels" argument METRICS_WITH_LABELS = [ "confusion_matrix", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", ] # Metrics with a "normalize" option METRICS_WITH_NORMALIZE_OPTION = [ "accuracy_score", "jaccard_similarity_score", "zero_one_loss", ] # Threshold-based metrics with "multilabel-indicator" format support THRESHOLDED_MULTILABEL_METRICS = [ "log_loss", "roc_auc_score", "weighted_roc_auc", "samples_roc_auc", "micro_roc_auc", "macro_roc_auc", "average_precision_score", "weighted_average_precision_score", "samples_average_precision_score", "micro_average_precision_score", "macro_average_precision_score", ] # Classification metrics with "multilabel-indicator" and # "multilabel-sequence" format support MULTILABELS_METRICS = [ "accuracy_score", "unnormalized_accuracy_score", "hamming_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "zero_one_loss", "unnormalized_zero_one_loss", "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f1_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f1_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "samples_f0.5_score", "samples_f1_score", "samples_f2_score", "samples_precision_score", "samples_recall_score", ] # Regression metrics with "multioutput-continuous" format support MULTIOUTPUT_METRICS = [ "mean_absolute_error", "mean_squared_error", "r2_score", ] # Symmetric with respect to their input arguments y_true and y_pred # metric(y_true, y_pred) == metric(y_pred, y_true). SYMMETRIC_METRICS = [ "accuracy_score", "unnormalized_accuracy_score", "hamming_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "zero_one_loss", "unnormalized_zero_one_loss", "f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score", "matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error" ] # Asymmetric with respect to their input arguments y_true and y_pred # metric(y_true, y_pred) != metric(y_pred, y_true). NOT_SYMMETRIC_METRICS = [ "explained_variance_score", "r2_score", "confusion_matrix", "precision_score", "recall_score", "f2_score", "f0.5_score", "weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score", "weighted_recall_score", "micro_f0.5_score", "micro_f2_score", "micro_precision_score", "micro_recall_score", "macro_f0.5_score", "macro_f2_score", "macro_precision_score", "macro_recall_score", "log_loss", "hinge_loss" ] # No Sample weight support METRICS_WITHOUT_SAMPLE_WEIGHT = [ "confusion_matrix", "hamming_loss", "hinge_loss", "jaccard_similarity_score", "unnormalized_jaccard_similarity_score", "log_loss", "matthews_corrcoef_score", ] ############################################################################### # Utilities for testing def make_prediction(dataset=None, binary=False): """Make some classification predictions on a toy dataset using a SVC If binary is True restrict to a binary classification problem instead of a multiclass classification problem """ if dataset is None: # import some data to play with dataset = datasets.load_iris() X = dataset.data y = dataset.target if binary: # restrict to a binary classification task X, y = X[y < 2], y[y < 2] n_samples, n_features = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) X, y = X[p], y[p] half = int(n_samples / 2) # add noisy features to make the problem harder and avoid perfect results rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] # run classifier, get class probabilities and label predictions clf = svm.SVC(kernel='linear', probability=True, random_state=0) probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if binary: # only interested in probabilities of the positive case # XXX: do we really want a special API for the binary case? probas_pred = probas_pred[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] return y_true, y_pred, probas_pred def _auc(y_true, y_score): """Alternative implementation to check for correctness of `roc_auc_score`.""" pos_label = np.unique(y_true)[1] # Count the number of times positive samples are correctly ranked above # negative samples. pos = y_score[y_true == pos_label] neg = y_score[y_true != pos_label] diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1) n_correct = np.sum(diff_matrix > 0) return n_correct / float(len(pos) * len(neg)) ############################################################################### # Tests def _average_precision(y_true, y_score): """Alternative implementation to check for correctness of `average_precision_score`.""" pos_label = np.unique(y_true)[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_score = y_score[order] y_true = y_true[order] score = 0 for i in xrange(len(y_score)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in xrange(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec return score / n_pos def test_roc_curve(): """Test Area under Receiver Operating Characteristic (ROC) curve""" y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) roc_auc = auc(fpr, tpr) expected_auc = _auc(y_true, probas_pred) assert_array_almost_equal(roc_auc, expected_auc, decimal=2) assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred)) assert_almost_equal(roc_auc, ignore_warnings(auc_score)(y_true, probas_pred)) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_end_points(): # Make sure that roc_curve returns a curve start at 0 and ending and # 1 even in corner cases rng = np.random.RandomState(0) y_true = np.array([0] * 50 + [1] * 50) y_pred = rng.randint(3, size=100) fpr, tpr, thr = roc_curve(y_true, y_pred) assert_equal(fpr[0], 0) assert_equal(fpr[-1], 1) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thr.shape) def test_roc_returns_consistency(): """Test whether the returned threshold matches up with tpr""" # make small toy dataset y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) # use the given thresholds to determine the tpr tpr_correct = [] for t in thresholds: tp = np.sum((probas_pred >= t) & y_true) p = np.sum(y_true) tpr_correct.append(1.0 * tp / p) # compare tpr and tpr_correct to see if the thresholds' order was correct assert_array_almost_equal(tpr, tpr_correct, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_nonrepeating_thresholds(): """Test to ensure that we don't return spurious repeating thresholds. Duplicated thresholds can arise due to machine precision issues. """ dataset = datasets.load_digits() X = dataset['data'] y = dataset['target'] # This random forest classifier can only return probabilities # significant to two decimal places clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0) # How well can the classifier predict whether a digit is less than 5? # This task contributes floating point roundoff errors to the probabilities train, test = slice(None, None, 2), slice(1, None, 2) probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test]) y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here y_true = [yy < 5 for yy in y[test]] # Check for repeating values in the thresholds fpr, tpr, thresholds = roc_curve(y_true, y_score) assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size) def test_roc_curve_multi(): """roc_curve not applicable for multi-class problems""" y_true, _, probas_pred = make_prediction(binary=False) assert_raises(ValueError, roc_curve, y_true, probas_pred) def test_roc_curve_confidence(): """roc_curve for confidence scores""" y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.90, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_hard(): """roc_curve for hard decisions""" y_true, pred, probas_pred = make_prediction(binary=True) # always predict one trivial_pred = np.ones(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # always predict zero trivial_pred = np.zeros(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # hard decisions fpr, tpr, thresholds = roc_curve(y_true, pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.78, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_one_label(): y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # assert there are warnings w = UndefinedMetricWarning fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred) # all true labels, all fpr should be nan assert_array_equal(fpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # assert there are warnings fpr, tpr, thresholds = assert_warns(w, roc_curve, [1 - x for x in y_true], y_pred) # all negative labels, all tpr should be nan assert_array_equal(tpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_toydata(): # Binary classification y_true = [0, 1] y_score = [0, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [0, 1] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1, 1]) assert_array_almost_equal(fpr, [0, 0, 1]) assert_almost_equal(roc_auc, 0.) y_true = [1, 0] y_score = [1, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) y_true = [1, 0] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, .5) y_true = [0, 0] y_score = [0.25, 0.75] tpr, fpr, _ = roc_curve(y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [0., 0.5, 1.]) assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan]) y_true = [1, 1] y_score = [0.25, 0.75] tpr, fpr, _ = roc_curve(y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [np.nan, np.nan]) assert_array_almost_equal(fpr, [0.5, 1.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5) def test_auc(): """Test Area Under Curve (AUC) computation""" x = [0, 1] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0, 0] y = [0, 1, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [0, 1] y = [1, 1] assert_array_almost_equal(auc(x, y), 1) x = [0, 0.5, 1] y = [0, 0.5, 1] assert_array_almost_equal(auc(x, y), 0.5) def test_auc_duplicate_values(): # Test Area Under Curve (AUC) computation with duplicate values # auc() was previously sorting the x and y arrays according to the indices # from numpy.argsort(x), which was reordering the tied 0's in this example # and resulting in an incorrect area computation. This test detects the # error. x = [-2.0, 0.0, 0.0, 0.0, 1.0] y1 = [2.0, 0.0, 0.5, 1.0, 1.0] y2 = [2.0, 1.0, 0.0, 0.5, 1.0] y3 = [2.0, 1.0, 0.5, 0.0, 1.0] for y in (y1, y2, y3): assert_array_almost_equal(auc(x, y, reorder=True), 3.0) def test_auc_errors(): # Incompatible shapes assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2]) # Too few x values assert_raises(ValueError, auc, [0.0], [0.1]) # x is not in order assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0]) def test_auc_score_non_binary_class(): """Test that roc_auc_score function returns an error when trying to compute AUC for non-binary class values. """ rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) with warnings.catch_warnings(record=True): rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) def test_precision_recall_f1_score_binary(): """Test Precision Recall and F1 Score for binary classification task""" y_true, y_pred, _ = make_prediction(binary=True) # detailed measures for each class p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.73, 0.85], 2) assert_array_almost_equal(r, [0.88, 0.68], 2) assert_array_almost_equal(f, [0.80, 0.76], 2) assert_array_equal(s, [25, 25]) # individual scoring function that can be used for grid search: in the # binary class case the score is the value of the measure for the positive # class (e.g. label == 1) ps = precision_score(y_true, y_pred) assert_array_almost_equal(ps, 0.85, 2) rs = recall_score(y_true, y_pred) assert_array_almost_equal(rs, 0.68, 2) fs = f1_score(y_true, y_pred) assert_array_almost_equal(fs, 0.76, 2) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2), (1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2) @ignore_warnings def test_precision_recall_f_binary_single_class(): """Test precision, recall and F1 score behave with a single positive or negative class Such a case may occur with non-stratified cross-validation""" assert_equal(1., precision_score([1, 1], [1, 1])) assert_equal(1., recall_score([1, 1], [1, 1])) assert_equal(1., f1_score([1, 1], [1, 1])) assert_equal(0., precision_score([-1, -1], [-1, -1])) assert_equal(0., recall_score([-1, -1], [-1, -1])) assert_equal(0., f1_score([-1, -1], [-1, -1])) def test_average_precision_score_score_non_binary_class(): """Test that average_precision_score function returns an error when trying to compute average_precision_score for multiclass task. """ rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", average_precision_score, y_true, y_pred) def test_average_precision_score_duplicate_values(): # Duplicate values with precision-recall require a different # processing than when computing the AUC of a ROC, because the # precision-recall curve is a decreasing curve # The following situtation corresponds to a perfect # test statistic, the average_precision_score should be 1 y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1] assert_equal(average_precision_score(y_true, y_score), 1) def test_average_precision_score_tied_values(): # Here if we go from left to right in y_true, the 0 values are # are separated from the 1 values, so it appears that we've # Correctly sorted our classifications. But in fact the first two # values have the same score (0.5) and so the first two values # could be swapped around, creating an imperfect sorting. This # imperfection should come through in the end score, making it less # than one. y_true = [0, 1, 1] y_score = [.5, .5, .6] assert_not_equal(average_precision_score(y_true, y_score), 1.) def test_precision_recall_fscore_support_errors(): y_true, y_pred, _ = make_prediction(binary=True) # Bad beta assert_raises(ValueError, precision_recall_fscore_support, y_true, y_pred, beta=0.0) # Bad pos_label assert_raises(ValueError, precision_recall_fscore_support, y_true, y_pred, pos_label=2, average='macro') # Bad average option assert_raises(ValueError, precision_recall_fscore_support, [0, 1, 2], [1, 2, 0], average='mega') def test_confusion_matrix_binary(): """Test confusion matrix - binary classification case""" y_true, y_pred, _ = make_prediction(binary=True) def test(y_true, y_pred): cm = confusion_matrix(y_true, y_pred) assert_array_equal(cm, [[22, 3], [8, 17]]) tp, fp, fn, tn = cm.flatten() num = (tp * tn - fp * fn) den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) true_mcc = 0 if den == 0 else num / den mcc = matthews_corrcoef(y_true, y_pred) assert_array_almost_equal(mcc, true_mcc, decimal=2) assert_array_almost_equal(mcc, 0.57, decimal=2) test(y_true, y_pred) test([str(y) for y in y_true], [str(y) for y in y_pred]) @ignore_warnings def test_matthews_corrcoef_nan(): assert_equal(matthews_corrcoef([0], [1]), 0.0) assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0) def test_precision_recall_f1_score_multiclass(): """Test Precision Recall and F1 Score for multiclass classification task""" y_true, y_pred, _ = make_prediction(binary=False) # compute scores with default labels introspection p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2) assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2) assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2) assert_array_equal(s, [24, 31, 20]) # averaging tests ps = precision_score(y_true, y_pred, pos_label=1, average='micro') assert_array_almost_equal(ps, 0.53, 2) rs = recall_score(y_true, y_pred, average='micro') assert_array_almost_equal(rs, 0.53, 2) fs = f1_score(y_true, y_pred, average='micro') assert_array_almost_equal(fs, 0.53, 2) ps = precision_score(y_true, y_pred, average='macro') assert_array_almost_equal(ps, 0.53, 2) rs = recall_score(y_true, y_pred, average='macro') assert_array_almost_equal(rs, 0.60, 2) fs = f1_score(y_true, y_pred, average='macro') assert_array_almost_equal(fs, 0.51, 2) ps = precision_score(y_true, y_pred, average='weighted') assert_array_almost_equal(ps, 0.51, 2) rs = recall_score(y_true, y_pred, average='weighted') assert_array_almost_equal(rs, 0.53, 2) fs = f1_score(y_true, y_pred, average='weighted') assert_array_almost_equal(fs, 0.47, 2) assert_raises(ValueError, precision_score, y_true, y_pred, average="samples") assert_raises(ValueError, recall_score, y_true, y_pred, average="samples") assert_raises(ValueError, f1_score, y_true, y_pred, average="samples") assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples", beta=0.5) # same prediction but with and explicit label ordering p, r, f, s = precision_recall_fscore_support( y_true, y_pred, labels=[0, 2, 1], average=None) assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2) assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2) assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2) assert_array_equal(s, [24, 20, 31]) def test_precision_recall_f1_score_multiclass_pos_label_none(): """Test Precision Recall and F1 Score for multiclass classification task GH Issue #1296 """ # initialize data y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1]) y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1]) # compute scores with default labels introspection p, r, f, s = precision_recall_fscore_support(y_true, y_pred, pos_label=None, average='weighted') def test_zero_precision_recall(): """Check that pathological cases do not bring NaNs""" old_error_settings = np.seterr(all='raise') try: y_true = np.array([0, 1, 2, 0, 1, 2]) y_pred = np.array([2, 0, 1, 1, 2, 0]) assert_almost_equal(precision_score(y_true, y_pred, average='weighted'), 0.0, 2) assert_almost_equal(recall_score(y_true, y_pred, average='weighted'), 0.0, 2) assert_almost_equal(f1_score(y_true, y_pred, average='weighted'), 0.0, 2) finally: np.seterr(**old_error_settings) def test_confusion_matrix_multiclass(): """Test confusion matrix - multi-class case""" y_true, y_pred, _ = make_prediction(binary=False) def test(y_true, y_pred, string_type=False): # compute confusion matrix with default labels introspection cm = confusion_matrix(y_true, y_pred) assert_array_equal(cm, [[19, 4, 1], [4, 3, 24], [0, 2, 18]]) # compute confusion matrix with explicit label ordering labels = ['0', '2', '1'] if string_type else [0, 2, 1] cm = confusion_matrix(y_true, y_pred, labels=labels) assert_array_equal(cm, [[19, 1, 4], [0, 18, 2], [4, 24, 3]]) test(y_true, y_pred) test(list(str(y) for y in y_true), list(str(y) for y in y_pred), string_type=True) def test_confusion_matrix_multiclass_subset_labels(): """Test confusion matrix - multi-class case with subset of labels""" y_true, y_pred, _ = make_prediction(binary=False) # compute confusion matrix with only first two labels considered cm = confusion_matrix(y_true, y_pred, labels=[0, 1]) assert_array_equal(cm, [[19, 4], [4, 3]]) # compute confusion matrix with explicit label ordering for only subset # of labels cm = confusion_matrix(y_true, y_pred, labels=[2, 1]) assert_array_equal(cm, [[18, 2], [24, 3]]) def test_classification_report_multiclass(): """Test performance report""" iris = datasets.load_iris() y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) # print classification report with class names expected_report = """\ precision recall f1-score support setosa 0.83 0.79 0.81 24 versicolor 0.33 0.10 0.15 31 virginica 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report( y_true, y_pred, labels=np.arange(len(iris.target_names)), target_names=iris.target_names) assert_equal(report, expected_report) # print classification report with label detection expected_report = """\ precision recall f1-score support 0 0.83 0.79 0.81 24 1 0.33 0.10 0.15 31 2 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_classification_report_multiclass_with_string_label(): y_true, y_pred, _ = make_prediction(binary=False) y_true = np.array(["blue", "green", "red"])[y_true] y_pred = np.array(["blue", "green", "red"])[y_pred] expected_report = """\ precision recall f1-score support blue 0.83 0.79 0.81 24 green 0.33 0.10 0.15 31 red 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred) assert_equal(report, expected_report) expected_report = """\ precision recall f1-score support a 0.83 0.79 0.81 24 b 0.33 0.10 0.15 31 c 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ report = classification_report(y_true, y_pred, target_names=["a", "b", "c"]) assert_equal(report, expected_report) def test_classification_report_multiclass_with_unicode_label(): y_true, y_pred, _ = make_prediction(binary=False) labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"]) y_true = labels[y_true] y_pred = labels[y_pred] expected_report = u"""\ precision recall f1-score support blue\xa2 0.83 0.79 0.81 24 green\xa2 0.33 0.10 0.15 31 red\xa2 0.42 0.90 0.57 20 avg / total 0.51 0.53 0.47 75 """ if np_version[:3] < (1, 7, 0): expected_message = ("NumPy < 1.7.0 does not implement" " searchsorted on unicode data correctly.") assert_raise_message(RuntimeError, expected_message, classification_report, y_true, y_pred) else: report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_multilabel_classification_report(): n_classes = 4 n_samples = 50 # using sequence of sequences is deprecated, but still tested make_ml = ignore_warnings(make_multilabel_classification) _, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0, n_samples=n_samples) _, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1, n_samples=n_samples) expected_report = """\ precision recall f1-score support 0 0.39 0.73 0.51 15 1 0.57 0.75 0.65 28 2 0.33 0.11 0.17 18 3 0.44 0.50 0.47 24 avg / total 0.45 0.54 0.47 85 """ lb = MultiLabelBinarizer() lb.fit([range(4)]) y_true_bi = lb.transform(y_true_ll) y_pred_bi = lb.transform(y_pred_ll) for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]: report = classification_report(y_true, y_pred) assert_equal(report, expected_report) def test_precision_recall_curve(): y_true, _, probas_pred = make_prediction(binary=True) _test_precision_recall_curve(y_true, probas_pred) # Use {-1, 1} for labels; make sure original labels aren't modified y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() _test_precision_recall_curve(y_true, probas_pred) assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] p, r, t = precision_recall_curve(labels, predict_probas) assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.])) assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.])) assert_array_almost_equal(t, np.array([1, 2, 3, 4])) assert_equal(p.size, r.size) assert_equal(p.size, t.size + 1) def test_precision_recall_curve_pos_label(): y_true, _, probas_pred = make_prediction(binary=False) pos_label = 2 p, r, thresholds = precision_recall_curve(y_true, probas_pred[:, pos_label], pos_label=pos_label) p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label, probas_pred[:, pos_label]) assert_array_almost_equal(p, p2) assert_array_almost_equal(r, r2) assert_array_almost_equal(thresholds, thresholds2) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def _test_precision_recall_curve(y_true, probas_pred): """Test Precision-Recall and aread under PR curve""" p, r, thresholds = precision_recall_curve(y_true, probas_pred) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.85, 2) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, probas_pred)) assert_almost_equal(_average_precision(y_true, probas_pred), precision_recall_auc, 1) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) # Smoke test in the case of proba having only one value p, r, thresholds = precision_recall_curve(y_true, np.zeros_like(probas_pred)) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.75, 3) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def test_precision_recall_curve_errors(): # Contains non-binary labels assert_raises(ValueError, precision_recall_curve, [0, 1, 2], [[0.0], [1.0], [1.0]]) def test_precision_recall_curve_toydata(): with np.errstate(all="raise"): # Binary classification y_true = [0, 1] y_score = [0, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [0, 1] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 0., 1.]) assert_array_almost_equal(r, [1., 0., 0.]) assert_almost_equal(auc_prc, 0.25) y_true = [1, 0] y_score = [1, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1., 0]) assert_almost_equal(auc_prc, .75) y_true = [1, 0] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1, 0.]) assert_almost_equal(auc_prc, .75) y_true = [0, 0] y_score = [0.25, 0.75] assert_raises(Exception, precision_recall_curve, y_true, y_score) assert_raises(Exception, average_precision_score, y_true, y_score) y_true = [1, 1] y_score = [0.25, 0.75] p, r, _ = precision_recall_curve(y_true, y_score) assert_almost_equal(average_precision_score(y_true, y_score), 1.) assert_array_almost_equal(p, [1., 1., 1.]) assert_array_almost_equal(r, [1, 0.5, 0.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.625) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.625) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.25) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.75) def test_score_scale_invariance(): # Test that average_precision_score and roc_auc_score are invariant by # the scaling or shifting of probabilities y_true, _, probas_pred = make_prediction(binary=True) roc_auc = roc_auc_score(y_true, probas_pred) roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred) roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) f = ignore_warnings(auc_score) roc_auc = f(y_true, probas_pred) roc_auc_scaled = f(y_true, 100 * probas_pred) roc_auc_shifted = f(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) pr_auc = average_precision_score(y_true, probas_pred) pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred) pr_auc_shifted = average_precision_score(y_true, probas_pred - 10) assert_equal(pr_auc, pr_auc_scaled) assert_equal(pr_auc, pr_auc_shifted) def test_losses(): """Test loss functions""" y_true, y_pred, _ = make_prediction(binary=True) n_samples = y_true.shape[0] n_classes = np.size(unique_labels(y_true)) # Classification # -------------- # Throw deprecated warning assert_almost_equal(zero_one_loss(y_true, y_pred), 11 / float(n_samples), 2) assert_equal(zero_one_loss(y_true, y_pred, normalize=False), 11) assert_almost_equal(zero_one_loss(y_true, y_true), 0.0, 2) assert_almost_equal(hamming_loss(y_true, y_pred), 2 * 11. / (n_samples * n_classes), 2) assert_equal(accuracy_score(y_true, y_pred), 1 - zero_one_loss(y_true, y_pred)) # Regression # ---------- assert_almost_equal(mean_squared_error(y_true, y_pred), 10.999 / n_samples, 2) assert_almost_equal(mean_squared_error(y_true, y_true), 0.00, 2) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. assert_almost_equal(mean_absolute_error(y_true, y_pred), 10.999 / n_samples, 2) assert_almost_equal(mean_absolute_error(y_true, y_true), 0.00, 2) assert_almost_equal(explained_variance_score(y_true, y_pred), 0.16, 2) assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2) assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0) assert_almost_equal(r2_score(y_true, y_pred), 0.12, 2) assert_almost_equal(r2_score(y_true, y_true), 1.00, 2) assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0) assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0) def test_losses_at_limits(): # test limit cases assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2) assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2) def test_symmetry(): """Test the symmetry of score and loss functions""" y_true, y_pred, _ = make_prediction(binary=True) # We shouldn't forget any metrics assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS, METRIC_UNDEFINED_MULTICLASS), set(ALL_METRICS)) assert_equal( set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)), set([])) # Symmetric metric for name in SYMMETRIC_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_pred), metric(y_pred, y_true), err_msg="%s is not symmetric" % name) # Not symmetric metrics for name in NOT_SYMMETRIC_METRICS: metric = ALL_METRICS[name] assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)), msg="%s seems to be symmetric" % name) def test_sample_order_invariance(): y_true, y_pred, _ = make_prediction(binary=True) y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0) for name, metric in ALL_METRICS.items(): if name in METRIC_UNDEFINED_MULTICLASS: continue assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) def test_sample_order_invariance_multilabel_and_multioutput(): random_state = check_random_state(0) # Generate some data y_true = random_state.randint(0, 2, size=(20, 25)) y_pred = random_state.randint(0, 2, size=(20, 25)) y_score = random_state.normal(size=y_true.shape) y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true, y_pred, y_score, random_state=0) for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) for name in THRESHOLDED_MULTILABEL_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_score), metric(y_true_shuffle, y_score_shuffle), err_msg="%s is not sample order invariant" % name) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] assert_almost_equal(metric(y_true, y_score), metric(y_true_shuffle, y_score_shuffle), err_msg="%s is not sample order invariant" % name) assert_almost_equal(metric(y_true, y_pred), metric(y_true_shuffle, y_pred_shuffle), err_msg="%s is not sample order invariant" % name) def test_format_invariance_with_1d_vectors(): y1, y2, _ = make_prediction(binary=True) y1_list = list(y1) y2_list = list(y2) y1_1d, y2_1d = np.array(y1), np.array(y2) assert_equal(y1_1d.ndim, 1) assert_equal(y2_1d.ndim, 1) y1_column = np.reshape(y1_1d, (-1, 1)) y2_column = np.reshape(y2_1d, (-1, 1)) y1_row = np.reshape(y1_1d, (1, -1)) y2_row = np.reshape(y2_1d, (1, -1)) for name, metric in ALL_METRICS.items(): if name in METRIC_UNDEFINED_MULTICLASS: continue measure = metric(y1, y2) assert_almost_equal(metric(y1_list, y2_list), measure, err_msg="%s is not representation invariant " "with list" % name) assert_almost_equal(metric(y1_1d, y2_1d), measure, err_msg="%s is not representation invariant " "with np-array-1d" % name) assert_almost_equal(metric(y1_column, y2_column), measure, err_msg="%s is not representation invariant " "with np-array-column" % name) # Mix format support assert_almost_equal(metric(y1_1d, y2_list), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and list" % name) assert_almost_equal(metric(y1_list, y2_1d), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and list" % name) assert_almost_equal(metric(y1_1d, y2_column), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and np-array-column" % name) assert_almost_equal(metric(y1_column, y2_1d), measure, err_msg="%s is not representation invariant " "with mix np-array-1d and np-array-column" % name) assert_almost_equal(metric(y1_list, y2_column), measure, err_msg="%s is not representation invariant " "with mix list and np-array-column" % name) assert_almost_equal(metric(y1_column, y2_list), measure, err_msg="%s is not representation invariant " "with mix list and np-array-column" % name) # These mix representations aren't allowed assert_raises(ValueError, metric, y1_1d, y2_row) assert_raises(ValueError, metric, y1_row, y2_1d) assert_raises(ValueError, metric, y1_list, y2_row) assert_raises(ValueError, metric, y1_row, y2_list) assert_raises(ValueError, metric, y1_column, y2_row) assert_raises(ValueError, metric, y1_row, y2_column) # NB: We do not test for y1_row, y2_row as these may be # interpreted as multilabel or multioutput data. if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS + MULTILABELS_METRICS)): assert_raises(ValueError, metric, y1_row, y2_row) def test_invariance_string_vs_numbers_labels(): """Ensure that classification metrics with string labels""" y1, y2, _ = make_prediction(binary=True) y1_str = np.array(["eggs", "spam"])[y1] y2_str = np.array(["eggs", "spam"])[y2] pos_label_str = "spam" labels_str = ["eggs", "spam"] for name, metric in CLASSIFICATION_METRICS.items(): if name in METRIC_UNDEFINED_MULTICLASS: continue measure_with_number = metric(y1, y2) # Ugly, but handle case with a pos_label and label metric_str = metric if name in METRICS_WITH_POS_LABEL: metric_str = partial(metric_str, pos_label=pos_label_str) measure_with_str = metric_str(y1_str, y2_str) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number invariance " "test".format(name)) measure_with_strobj = metric_str(y1_str.astype('O'), y2_str.astype('O')) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string object vs number " "invariance test".format(name)) if name in METRICS_WITH_LABELS: metric_str = partial(metric_str, labels=labels_str) measure_with_str = metric_str(y1_str, y2_str) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number " "invariance test".format(name)) measure_with_strobj = metric_str(y1_str.astype('O'), y2_str.astype('O')) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string vs number " "invariance test".format(name)) for name, metric in THRESHOLDED_METRICS.items(): if name in ("log_loss", "hinge_loss"): measure_with_number = metric(y1, y2) measure_with_str = metric(y1_str, y2) assert_array_equal(measure_with_number, measure_with_str, err_msg="{0} failed string vs number " "invariance test".format(name)) measure_with_strobj = metric(y1_str.astype('O'), y2) assert_array_equal(measure_with_number, measure_with_strobj, err_msg="{0} failed string object vs number " "invariance test".format(name)) else: # TODO those metrics doesn't support string label yet assert_raises(ValueError, metric, y1_str, y2) assert_raises(ValueError, metric, y1_str.astype('O'), y2) @ignore_warnings def check_single_sample(name): """Non-regression test: scores should work with a single sample. This is important for leave-one-out cross validation. Score functions tested are those that formerly called np.squeeze, which turns an array of size 1 into a 0-d array (!). """ metric = ALL_METRICS[name] # assert that no exception is thrown for i, j in product([0, 1], repeat=2): metric([i], [j]) @ignore_warnings def check_single_sample_multioutput(name): metric = ALL_METRICS[name] for i, j, k, l in product([0, 1], repeat=4): metric(np.array([[i, j]]), np.array([[k, l]])) def test_single_sample(): for name in ALL_METRICS: if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS: # Those metrics are not always defined with one sample # or in multiclass classification continue yield check_single_sample, name for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS: yield check_single_sample_multioutput, name def test_hinge_loss_binary(): y_true = np.array([-1, 1, 1, -1]) pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4) y_true = np.array([0, 2, 2, 0]) pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4) def test_multioutput_regression(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) error = mean_squared_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. error = mean_absolute_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = r2_score(y_true, y_pred) assert_almost_equal(error, 1 - 5. / 2) def test_multioutput_number_of_output_differ(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0], [1, 0], [0, 0]]) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] assert_raises(ValueError, metric, y_true, y_pred) def test_multioutput_regression_invariance_to_dimension_shuffling(): # test invariance to dimension shuffling y_true, y_pred, _ = make_prediction() n_dims = 3 y_true = np.reshape(y_true, (-1, n_dims)) y_pred = np.reshape(y_pred, (-1, n_dims)) rng = check_random_state(314159) for name in MULTIOUTPUT_METRICS: metric = ALL_METRICS[name] error = metric(y_true, y_pred) for _ in xrange(3): perm = rng.permutation(n_dims) assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]), error, err_msg="%s is not dimension shuffling " "invariant" % name) def test_multilabel_representation_invariance(): # Generate some data n_classes = 4 n_samples = 50 # using sequence of sequences is deprecated, but still tested make_ml = ignore_warnings(make_multilabel_classification) _, y1 = make_ml(n_features=1, n_classes=n_classes, random_state=0, n_samples=n_samples) _, y2 = make_ml(n_features=1, n_classes=n_classes, random_state=1, n_samples=n_samples) # Be sure to have at least one empty label y1 += ([], ) y2 += ([], ) # NOTE: The "sorted" trick is necessary to shuffle labels, because it # allows to return the shuffled tuple. rng = check_random_state(42) shuffled = lambda x: sorted(x, key=lambda *args: rng.rand()) y1_shuffle = [shuffled(x) for x in y1] y2_shuffle = [shuffled(x) for x in y2] # Let's have redundant labels y1_redundant = [x * rng.randint(1, 4) for x in y1] y2_redundant = [x * rng.randint(1, 4) for x in y2] # Binary indicator matrix format lb = MultiLabelBinarizer().fit([range(n_classes)]) y1_binary_indicator = lb.transform(y1) y2_binary_indicator = lb.transform(y2) y1_shuffle_binary_indicator = lb.transform(y1_shuffle) y2_shuffle_binary_indicator = lb.transform(y2_shuffle) for name in MULTILABELS_METRICS: metric = ALL_METRICS[name] # XXX cruel hack to work with partial functions if isinstance(metric, partial): metric.__module__ = 'tmp' metric.__name__ = name # Check warning for sequence of sequences measure = assert_warns(DeprecationWarning, metric, y1, y2) metric = ignore_warnings(metric) # Check representation invariance assert_almost_equal(metric(y1_binary_indicator, y2_binary_indicator), measure, err_msg="%s failed representation invariance " "between list of list of labels " "format and dense binary indicator " "format." % name) # Check invariance with redundant labels with list of labels assert_almost_equal(metric(y1, y2_redundant), measure, err_msg="%s failed rendundant label invariance" % name) assert_almost_equal(metric(y1_redundant, y2_redundant), measure, err_msg="%s failed rendundant label invariance" % name) assert_almost_equal(metric(y1_redundant, y2), measure, err_msg="%s failed rendundant label invariance" % name) # Check shuffling invariance with list of labels assert_almost_equal(metric(y1_shuffle, y2_shuffle), measure, err_msg="%s failed shuffling invariance " "with list of list of labels format." % name) # Check shuffling invariance with dense binary indicator matrix assert_almost_equal(metric(y1_shuffle_binary_indicator, y2_shuffle_binary_indicator), measure, err_msg="%s failed shuffling invariance " " with dense binary indicator format." % name) # Check raises error with mix input representation assert_raises(ValueError, metric, y1, y2_binary_indicator) assert_raises(ValueError, metric, y1_binary_indicator, y2) def test_multilabel_zero_one_loss_subset(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) assert_equal(zero_one_loss(y1, y2), 0.5) assert_equal(zero_one_loss(y1, y1), 0) assert_equal(zero_one_loss(y2, y2), 0) assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1) assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1) assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1) assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1) # List of tuple of label y1 = [(1, 2,), (0, 2,)] y2 = [(2,), (0, 2,)] assert_equal(zero_one_loss(y1, y2), 0.5) assert_equal(zero_one_loss(y1, y1), 0) assert_equal(zero_one_loss(y2, y2), 0) assert_equal(zero_one_loss(y2, [(), ()]), 1) assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1) def test_multilabel_hamming_loss(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) assert_equal(hamming_loss(y1, y2), 1 / 6) assert_equal(hamming_loss(y1, y1), 0) assert_equal(hamming_loss(y2, y2), 0) assert_equal(hamming_loss(y2, np.logical_not(y2)), 1) assert_equal(hamming_loss(y1, np.logical_not(y1)), 1) assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6) assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5) # List of tuple of label y1 = [(1, 2,), (0, 2,)] y2 = [(2,), (0, 2,)] assert_equal(hamming_loss(y1, y2), 1 / 6) assert_equal(hamming_loss(y1, y1), 0) assert_equal(hamming_loss(y2, y2), 0) assert_equal(hamming_loss(y2, [(), ()]), 0.75) assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625) assert_almost_equal(hamming_loss(y2, [tuple(), (10, )], classes=np.arange(11)), 0.1818, 2) def test_multilabel_accuracy_score_subset_accuracy(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) assert_equal(accuracy_score(y1, y2), 0.5) assert_equal(accuracy_score(y1, y1), 1) assert_equal(accuracy_score(y2, y2), 1) assert_equal(accuracy_score(y2, np.logical_not(y2)), 0) assert_equal(accuracy_score(y1, np.logical_not(y1)), 0) assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0) assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0) # List of tuple of label y1 = [(1, 2,), (0, 2,)] y2 = [(2,), (0, 2,)] assert_equal(accuracy_score(y1, y2), 0.5) assert_equal(accuracy_score(y1, y1), 1) assert_equal(accuracy_score(y2, y2), 1) assert_equal(accuracy_score(y2, [(), ()]), 0) assert_equal(accuracy_score(y1, y2, normalize=False), 1) assert_equal(accuracy_score(y1, y1, normalize=False), 2) assert_equal(accuracy_score(y2, y2, normalize=False), 2) assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0) def test_multilabel_jaccard_similarity_score(): # Dense label indicator matrix format y1 = np.array([[0, 1, 1], [1, 0, 1]]) y2 = np.array([[0, 0, 1], [1, 0, 1]]) # size(y1 \inter y2) = [1, 2] # size(y1 \union y2) = [2, 2] assert_equal(jaccard_similarity_score(y1, y2), 0.75) assert_equal(jaccard_similarity_score(y1, y1), 1) assert_equal(jaccard_similarity_score(y2, y2), 1) assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0) assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0) assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0) assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0) # List of tuple of label y1 = [(1, 2,), (0, 2,)] y2 = [(2,), (0, 2,)] assert_equal(jaccard_similarity_score(y1, y2), 0.75) assert_equal(jaccard_similarity_score(y1, y1), 1) assert_equal(jaccard_similarity_score(y2, y2), 1) assert_equal(jaccard_similarity_score(y2, [(), ()]), 0) # |y3 inter y4 | = [0, 1, 1] # |y3 union y4 | = [2, 1, 3] y3 = [(0,), (1,), (3,)] y4 = [(4,), (4,), (5, 6)] assert_almost_equal(jaccard_similarity_score(y3, y4), 0) # |y5 inter y6 | = [0, 1, 1] # |y5 union y6 | = [2, 1, 3] y5 = [(0,), (1,), (2, 3)] y6 = [(1,), (1,), (2, 0)] assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3) def test_normalize_option_binary_classification(): # Test in the binary case y_true, y_pred, _ = make_prediction(binary=True) n_samples = y_true.shape[0] for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure) def test_normalize_option_multiclasss_classification(): # Test in the multiclass case y_true, y_pred, _ = make_prediction(binary=False) n_samples = y_true.shape[0] for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] measure = metrics(y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure) def test_normalize_option_multilabel_classification(): # Test in the multilabel case n_classes = 4 n_samples = 100 # using sequence of sequences is deprecated, but still tested make_ml = ignore_warnings(make_multilabel_classification) _, y_true = make_ml(n_features=1, n_classes=n_classes, random_state=0, n_samples=n_samples) _, y_pred = make_ml(n_features=1, n_classes=n_classes, random_state=1, n_samples=n_samples) # Be sure to have at least one empty label y_true += ([], ) y_pred += ([], ) n_samples += 1 lb = MultiLabelBinarizer().fit([range(n_classes)]) y_true_binary_indicator = lb.transform(y_true) y_pred_binary_indicator = lb.transform(y_pred) for name in METRICS_WITH_NORMALIZE_OPTION: metrics = ALL_METRICS[name] # List of list of labels measure = assert_warns(DeprecationWarning, metrics, y_true, y_pred, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true, y_pred, normalize=False) / n_samples, measure, err_msg="Failed with %s" % name) # Indicator matrix format measure = metrics(y_true_binary_indicator, y_pred_binary_indicator, normalize=True) assert_greater(measure, 0, msg="We failed to test correctly the normalize option") assert_almost_equal(metrics(y_true_binary_indicator, y_pred_binary_indicator, normalize=False) / n_samples, measure, err_msg="Failed with %s" % name) @ignore_warnings def test_precision_recall_f1_score_multilabel_1(): """ Test precision_recall_f1_score on a crafted multilabel example """ # First crafted example y_true_ll = [(0,), (1,), (2, 3)] y_pred_ll = [(1,), (1,), (2, 0)] lb = LabelBinarizer() lb.fit([range(4)]) y_true_bi = lb.transform(y_true_ll) y_pred_bi = lb.transform(y_pred_ll) for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]: p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) #tp = [0, 1, 1, 0] #fn = [1, 0, 0, 1] #fp = [1, 1, 0, 0] # Check per class assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2) assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2) assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2) assert_array_almost_equal(s, [1, 1, 1, 1], 2) f2 = fbeta_score(y_true, y_pred, beta=2, average=None) support = s assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2) # Check macro p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") assert_almost_equal(p, 1.5 / 4) assert_almost_equal(r, 0.5) assert_almost_equal(f, 2.5 / 1.5 * 0.25) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2)) # Check micro p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") assert_almost_equal(p, 0.5) assert_almost_equal(r, 0.5) assert_almost_equal(f, 0.5) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="micro"), (1 + 4) * p * r / (4 * p + r)) # Check weigted p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") assert_almost_equal(p, 1.5 / 4) assert_almost_equal(r, 0.5) assert_almost_equal(f, 2.5 / 1.5 * 0.25) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="weighted"), np.average(f2, weights=support)) # Check weigted # |h(x_i) inter y_i | = [0, 1, 1] # |y_i| = [1, 1, 2] # |h(x_i)| = [1, 1, 2] p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") assert_almost_equal(p, 0.5) assert_almost_equal(r, 0.5) assert_almost_equal(f, 0.5) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.5) @ignore_warnings def test_precision_recall_f1_score_multilabel_2(): """ Test precision_recall_f1_score on a crafted multilabel example 2 """ # Second crafted example y_true_ll = [(1,), (2,), (2, 3)] y_pred_ll = [(4,), (4,), (2, 1)] lb = LabelBinarizer() lb.fit([range(1, 5)]) y_true_bi = lb.transform(y_true_ll) y_pred_bi = lb.transform(y_pred_ll) for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]: # tp = [ 0. 1. 0. 0.] # fp = [ 1. 0. 0. 2.] # fn = [ 1. 1. 1. 0.] p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2) assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2) assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2) assert_array_almost_equal(s, [1, 2, 1, 0], 2) f2 = fbeta_score(y_true, y_pred, beta=2, average=None) support = s assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") assert_almost_equal(p, 0.25) assert_almost_equal(r, 0.25) assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="micro"), (1 + 4) * p * r / (4 * p + r)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") assert_almost_equal(p, 0.25) assert_almost_equal(r, 0.125) assert_almost_equal(f, 2 / 12) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") assert_almost_equal(p, 2 / 4) assert_almost_equal(r, 1 / 4) assert_almost_equal(f, 2 / 3 * 2 / 4) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="weighted"), np.average(f2, weights=support)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") # Check weigted # |h(x_i) inter y_i | = [0, 0, 1] # |y_i| = [1, 1, 2] # |h(x_i)| = [1, 1, 2] assert_almost_equal(p, 1 / 6) assert_almost_equal(r, 1 / 6) assert_almost_equal(f, 2 / 4 * 1 / 3) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.1666, 2) @ignore_warnings def test_precision_recall_f1_score_with_an_empty_prediction(): y_true_ll = [(1,), (0,), (2, 1,)] y_pred_ll = [tuple(), (3,), (2, 1)] lb = LabelBinarizer() lb.fit([range(4)]) y_true_bi = lb.transform(y_true_ll) y_pred_bi = lb.transform(y_pred_ll) for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]: # true_pos = [ 0. 1. 1. 0.] # false_pos = [ 0. 0. 0. 1.] # false_neg = [ 1. 1. 0. 0.] p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None) assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2) assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2) assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2) assert_array_almost_equal(s, [1, 2, 1, 0], 2) f2 = fbeta_score(y_true, y_pred, beta=2, average=None) support = s assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="macro") assert_almost_equal(p, 0.5) assert_almost_equal(r, 1.5 / 4) assert_almost_equal(f, 2.5 / (4 * 1.5)) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"), np.mean(f2)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="micro") assert_almost_equal(p, 2 / 3) assert_almost_equal(r, 0.5) assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5)) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="micro"), (1 + 4) * p * r / (4 * p + r)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="weighted") assert_almost_equal(p, 3 / 4) assert_almost_equal(r, 0.5) assert_almost_equal(f, (2 / 1.5 + 1) / 4) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="weighted"), np.average(f2, weights=support)) p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average="samples") # |h(x_i) inter y_i | = [0, 0, 2] # |y_i| = [1, 1, 2] # |h(x_i)| = [0, 1, 2] assert_almost_equal(p, 1 / 3) assert_almost_equal(r, 1 / 3) assert_almost_equal(f, 1 / 3) assert_equal(s, None) assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"), 0.333, 2) def test_precision_recall_f1_no_labels(): y_true = np.zeros((20, 3)) y_pred = np.zeros_like(y_true) # tp = [0, 0, 0] # fn = [0, 0, 0] # fp = [0, 0, 0] # support = [0, 0, 0] # |y_hat_i inter y_i | = [0, 0, 0] # |y_i| = [0, 0, 0] # |y_hat_i| = [0, 0, 0] for beta in [1]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=None, beta=beta) assert_array_almost_equal(p, [0, 0, 0], 2) assert_array_almost_equal(r, [0, 0, 0], 2) assert_array_almost_equal(f, [0, 0, 0], 2) assert_array_almost_equal(s, [0, 0, 0], 2) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=None) assert_array_almost_equal(fbeta, [0, 0, 0], 2) for average in ["macro", "micro", "weighted", "samples"]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, y_true, y_pred, average=average, beta=beta) assert_almost_equal(p, 0) assert_almost_equal(r, 0) assert_almost_equal(f, 0) assert_equal(s, None) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, y_true, y_pred, beta=beta, average=average) assert_almost_equal(fbeta, 0) def test_prf_warnings(): # average of per-label scores f, w = precision_recall_fscore_support, UndefinedMetricWarning my_assert = assert_warns_message for average in [None, 'weighted', 'macro']: msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 in labels with no predicted samples.') my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average) msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 in labels with no true samples.') my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average) # average of per-sample scores msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 in samples with no predicted labels.') my_assert(w, msg, f, np.array([[1, 0], [1, 0]]), np.array([[1, 0], [0, 0]]), average='samples') msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 in samples with no true labels.') my_assert(w, msg, f, np.array([[1, 0], [0, 0]]), np.array([[1, 0], [1, 0]]), average='samples') # single score: micro-average msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 due to no predicted samples.') my_assert(w, msg, f, np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 due to no true samples.') my_assert(w, msg, f, np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') # single postive label msg = ('Precision and F-score are ill-defined and ' 'being set to 0.0 due to no predicted samples.') my_assert(w, msg, f, [1, 1], [-1, -1], average='macro') msg = ('Recall and F-score are ill-defined and ' 'being set to 0.0 due to no true samples.') my_assert(w, msg, f, [-1, -1], [1, 1], average='macro') def test_recall_warnings(): assert_no_warnings(recall_score, np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') recall_score(np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') assert_equal(str(record.pop().message), 'Recall is ill-defined and ' 'being set to 0.0 due to no true samples.') def test_precision_warnings(): with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') precision_score(np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') assert_equal(str(record.pop().message), 'Precision is ill-defined and ' 'being set to 0.0 due to no predicted samples.') assert_no_warnings(precision_score, np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') def test_fscore_warnings(): with warnings.catch_warnings(record=True) as record: warnings.simplefilter('always') for score in [f1_score, partial(fbeta_score, beta=2)]: score(np.array([[1, 1], [1, 1]]), np.array([[0, 0], [0, 0]]), average='micro') assert_equal(str(record.pop().message), 'F-score is ill-defined and ' 'being set to 0.0 due to no predicted samples.') score(np.array([[0, 0], [0, 0]]), np.array([[1, 1], [1, 1]]), average='micro') assert_equal(str(record.pop().message), 'F-score is ill-defined and ' 'being set to 0.0 due to no true samples.') def test__check_clf_targets(): """Check that _check_clf_targets correctly merges target types, squeezes output and fails if input lengths differ.""" IND = 'multilabel-indicator' SEQ = 'multilabel-sequences' MC = 'multiclass' BIN = 'binary' CNT = 'continuous' MMC = 'multiclass-multioutput' MCN = 'continuous-multioutput' # all of length 3 EXAMPLES = [ (IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])), # must not be considered binary (IND, np.array([[0, 1], [1, 0], [1, 1]])), (SEQ, [[2, 3], [1], [3]]), (MC, [2, 3, 1]), (BIN, [0, 1, 1]), (CNT, [0., 1.5, 1.]), (MC, np.array([[2], [3], [1]])), (BIN, np.array([[0], [1], [1]])), (CNT, np.array([[0.], [1.5], [1.]])), (MMC, np.array([[0, 2], [1, 3], [2, 3]])), (MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])), ] # expected type given input types, or None for error # (types will be tried in either order) EXPECTED = { (IND, IND): IND, (SEQ, SEQ): SEQ, (MC, MC): MC, (BIN, BIN): BIN, (IND, SEQ): None, (MC, SEQ): None, (BIN, SEQ): None, (MC, IND): None, (BIN, IND): None, (BIN, MC): MC, # Disallowed types (CNT, CNT): None, (MMC, MMC): None, (MCN, MCN): None, (IND, CNT): None, (SEQ, CNT): None, (MC, CNT): None, (BIN, CNT): None, (MMC, CNT): None, (MCN, CNT): None, (IND, MMC): None, (SEQ, MMC): None, (MC, MMC): None, (BIN, MMC): None, (MCN, MMC): None, (IND, MCN): None, (SEQ, MCN): None, (MC, MCN): None, (BIN, MCN): None, } for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2): try: expected = EXPECTED[type1, type2] except KeyError: expected = EXPECTED[type2, type1] if expected is None: assert_raises(ValueError, _check_clf_targets, y1, y2) if type1 != type2: assert_raise_message( ValueError, "Can't handle mix of {0} and {1}".format(type1, type2), _check_clf_targets, y1, y2) else: if type1 not in (BIN, MC, SEQ, IND): assert_raise_message(ValueError, "{0} is not supported".format(type1), _check_clf_targets, y1, y2) else: merged_type, y1out, y2out = _check_clf_targets(y1, y2) assert_equal(merged_type, expected) if not merged_type.startswith('multilabel'): assert_array_equal(y1out, np.squeeze(y1)) assert_array_equal(y2out, np.squeeze(y2)) assert_raises(ValueError, _check_clf_targets, y1[:-1], y2) def test__check_reg_targets(): # All of length 3 EXAMPLES = [ ("continuous", [1, 2, 3], 1), ("continuous", [[1], [2], [3]], 1), ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2), ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2), ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3), ] for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2): if type1 == type2 and n_out1 == n_out2: y_type, y_check1, y_check2 = _check_reg_targets(y1, y2) assert_equal(type1, y_type) if type1 == 'continuous': assert_array_equal(y_check1, np.reshape(y1, (-1, 1))) assert_array_equal(y_check2, np.reshape(y2, (-1, 1))) else: assert_array_equal(y_check1, y1) assert_array_equal(y_check2, y2) else: assert_raises(ValueError, _check_reg_targets, y1, y2) def test_log_loss(): # binary case with symbolic labels ("no" < "yes") y_true = ["no", "no", "no", "yes", "yes", "yes"] y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99], [0.9, 0.1], [0.75, 0.25], [0.001, 0.999]]) loss = log_loss(y_true, y_pred) assert_almost_equal(loss, 1.8817971) # multiclass case; adapted from http://bit.ly/RJJHWA y_true = [1, 0, 2] y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]] loss = log_loss(y_true, y_pred, normalize=True) assert_almost_equal(loss, 0.6904911) # check that we got all the shapes and axes right # by doubling the length of y_true and y_pred y_true *= 2 y_pred *= 2 loss = log_loss(y_true, y_pred, normalize=False) assert_almost_equal(loss, 0.6904911 * 6, decimal=6) # check eps and handling of absolute zero and one probabilities y_pred = np.asarray(y_pred) > .5 loss = log_loss(y_true, y_pred, normalize=True, eps=.1) assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9))) # raise error if number of classes are not equal. y_true = [1, 0, 2] y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]] assert_raises(ValueError, log_loss, y_true, y_pred) # case when y_true is a string array object y_true = ["ham", "spam", "spam", "ham"] y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]] loss = log_loss(y_true, y_pred) assert_almost_equal(loss, 1.0383217, decimal=6) @ignore_warnings def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel): n_samples, n_classes = y_true_binarize.shape # No averaging label_measure = metric(y_true, y_pred, average=None) assert_array_almost_equal(label_measure, [metric(y_true_binarize[:, i], y_pred_binarize[:, i]) for i in range(n_classes)]) # Micro measure micro_measure = metric(y_true, y_pred, average="micro") assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(), y_pred_binarize.ravel())) # Macro measure macro_measure = metric(y_true, y_pred, average="macro") assert_almost_equal(macro_measure, np.mean(label_measure)) # Weighted measure weights = np.sum(y_true_binarize, axis=0, dtype=int) if np.sum(weights) != 0: weighted_measure = metric(y_true, y_pred, average="weighted") assert_almost_equal(weighted_measure, np.average(label_measure, weights=weights)) else: weighted_measure = metric(y_true, y_pred, average="weighted") assert_almost_equal(weighted_measure, 0) # Sample measure if is_multilabel: sample_measure = metric(y_true, y_pred, average="samples") assert_almost_equal(sample_measure, np.mean([metric(y_true_binarize[i], y_pred_binarize[i]) for i in range(n_samples)])) assert_raises(ValueError, metric, y_true, y_pred, average="unknown") assert_raises(ValueError, metric, y_true, y_pred, average="garbage") def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score): is_multilabel = type_of_target(y_true).startswith("multilabel") metric = ALL_METRICS[name] if name in METRICS_WITH_AVERAGING: _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel) elif name in THRESHOLDED_METRICS_WITH_AVERAGING: _check_averaging(metric, y_true, y_score, y_true_binarize, y_score, is_multilabel) else: raise ValueError("Metric is not recorded as having an average option") def test_averaging_multiclass(): y_true, y_pred, y_score = make_prediction(binary=False) lb = LabelBinarizer().fit(y_true) y_true_binarize = lb.transform(y_true) y_pred_binarize = lb.transform(y_pred) for name in METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) def test_averaging_multilabel(): n_classes = 5 n_samples = 40 _, y = make_multilabel_classification(n_features=1, n_classes=n_classes, random_state=5, n_samples=n_samples, return_indicator=True, allow_unlabeled=False) y_true = y[:20] y_pred = y[20:] y_score = check_random_state(0).normal(size=(20, n_classes)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) def test_averaging_multilabel_all_zeroes(): y_true = np.zeros((20, 3)) y_pred = np.zeros((20, 3)) y_score = np.zeros((20, 3)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) # Test _average_binary_score for weight.sum() == 0 binary_metric = (lambda y_true, y_score, average="macro": _average_binary_score( precision_score, y_true, y_score, average)) _check_averaging(binary_metric, y_true, y_pred, y_true_binarize, y_pred_binarize, is_multilabel=True) def test_averaging_multilabel_all_ones(): y_true = np.ones((20, 3)) y_pred = np.ones((20, 3)) y_score = np.ones((20, 3)) y_true_binarize = y_true y_pred_binarize = y_pred for name in METRICS_WITH_AVERAGING: yield (check_averaging, name, y_true, y_true_binarize, y_pred, y_pred_binarize, y_score) @ignore_warnings def check_sample_weight_invariance(name, metric, y1, y2): rng = np.random.RandomState(0) sample_weight = rng.randint(1, 10, size=len(y1)) # check that unit weights gives the same score as no weight unweighted_score = metric(y1, y2, sample_weight=None) assert_equal( unweighted_score, metric(y1, y2, sample_weight=np.ones(shape=len(y1))), msg="For %s sample_weight=None is not equivalent to " "sample_weight=ones" % name) # check that the weighted and unweighted scores are unequal weighted_score = metric(y1, y2, sample_weight=sample_weight) assert_not_equal( unweighted_score, weighted_score, msg="Unweighted and weighted scores are unexpectedly " "equal (%f) for %s" % (weighted_score, name)) # check that sample_weight can be a list weighted_score_list = metric(y1, y2, sample_weight=sample_weight.tolist()) assert_equal( weighted_score, weighted_score_list, msg="Weighted scores for array and list sample_weight input are " "not equal (%f != %f) for %s" % ( weighted_score, weighted_score_list, name)) # check that integer weights is the same as repeated samples repeat_weighted_score = metric( np.repeat(y1, sample_weight, axis=0), np.repeat(y2, sample_weight, axis=0), sample_weight=None) assert_almost_equal( weighted_score, repeat_weighted_score, err_msg="Weighting %s is not equal to repeating samples" % name) # check that ignoring a fraction of the samples is equivalent to setting # the corresponding weights to zero sample_weight_subset = sample_weight[1::2] sample_weight_zeroed = np.copy(sample_weight) sample_weight_zeroed[::2] = 0 y1_subset = y1[1::2] y2_subset = y2[1::2] weighted_score_subset = metric(y1_subset, y2_subset, sample_weight=sample_weight_subset) weighted_score_zeroed = metric(y1, y2, sample_weight=sample_weight_zeroed) assert_almost_equal( weighted_score_subset, weighted_score_zeroed, err_msg="Zeroing weights does not give the same result as " "removing the corresponding samples (%f != %f) for %s" % ( weighted_score_zeroed, weighted_score_subset, name)) if not name.startswith('unnormalized'): # check that the score is invariant under scaling of the weights by a # common factor for scaling in [2, 0.3]: assert_almost_equal( weighted_score, metric(y1, y2, sample_weight=sample_weight * scaling), err_msg="%s sample_weight is not invariant " "under scaling" % name) def test_sample_weight_invariance(): # binary output y1, y2, _ = make_prediction(binary=True) for name in ALL_METRICS: if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or name in METRIC_UNDEFINED_MULTICLASS): continue metric = ALL_METRICS[name] yield check_sample_weight_invariance, name, metric, y1, y2 # multiclass y1, y2, _ = make_prediction() for name in ALL_METRICS: if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or name in METRIC_UNDEFINED_MULTICLASS): continue metric = ALL_METRICS[name] yield check_sample_weight_invariance, name, metric, y1, y2 # multilabel sequence _, ya = make_multilabel_classification( n_features=1, n_classes=3, random_state=0, n_samples=10) _, yb = make_multilabel_classification( n_features=1, n_classes=3, random_state=1, n_samples=10) y1 = ya + yb y2 = ya + ya for name in MULTILABELS_METRICS: if name in METRICS_WITHOUT_SAMPLE_WEIGHT: continue metric = ALL_METRICS[name] yield (check_sample_weight_invariance, name, metric, y1, y2) # multilabel indicator _, ya = make_multilabel_classification( n_features=1, n_classes=6, random_state=0, n_samples=10, return_indicator=True) _, yb = make_multilabel_classification( n_features=1, n_classes=6, random_state=1, n_samples=10, return_indicator=True) y1 = np.vstack([ya, yb]) y2 = np.vstack([ya, ya]) for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS + MULTIOUTPUT_METRICS): if name in METRICS_WITHOUT_SAMPLE_WEIGHT: continue metric = ALL_METRICS[name] yield (check_sample_weight_invariance, name, metric, y1, y2)
apache-2.0
khkaminska/scikit-learn
benchmarks/bench_plot_ward.py
290
1260
""" Benchmark scikit-learn's Ward implement compared to SciPy's """ import time import numpy as np from scipy.cluster import hierarchy import pylab as pl from sklearn.cluster import AgglomerativeClustering ward = AgglomerativeClustering(n_clusters=3, linkage='ward') n_samples = np.logspace(.5, 3, 9) n_features = np.logspace(1, 3.5, 7) N_samples, N_features = np.meshgrid(n_samples, n_features) scikits_time = np.zeros(N_samples.shape) scipy_time = np.zeros(N_samples.shape) for i, n in enumerate(n_samples): for j, p in enumerate(n_features): X = np.random.normal(size=(n, p)) t0 = time.time() ward.fit(X) scikits_time[j, i] = time.time() - t0 t0 = time.time() hierarchy.ward(X) scipy_time[j, i] = time.time() - t0 ratio = scikits_time / scipy_time pl.figure("scikit-learn Ward's method benchmark results") pl.imshow(np.log(ratio), aspect='auto', origin="lower") pl.colorbar() pl.contour(ratio, levels=[1, ], colors='k') pl.yticks(range(len(n_features)), n_features.astype(np.int)) pl.ylabel('N features') pl.xticks(range(len(n_samples)), n_samples.astype(np.int)) pl.xlabel('N samples') pl.title("Scikit's time, in units of scipy time (log)") pl.show()
bsd-3-clause
GBTAmmoniaSurvey/GAS
setup.py
3
4248
#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import os import sys import ah_bootstrap from setuptools import setup #A dirty hack to get around some early import/configurations ambiguities if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins builtins._ASTROPY_SETUP_ = True from astropy_helpers.setup_helpers import ( register_commands, adjust_compiler, get_debug_option, get_package_info) from astropy_helpers.git_helpers import get_git_devstr from astropy_helpers.version_helpers import generate_version_py # Get some values from the setup.cfg #from distutils import config #conf = config.RawConfigParser() try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser conf = ConfigParser() conf.read(['setup.cfg']) metadata = dict(conf.items('metadata')) PACKAGENAME = metadata.get('package_name', 'packagename') DESCRIPTION = metadata.get('description', 'Astropy affiliated package') AUTHOR = metadata.get('author', '') AUTHOR_EMAIL = metadata.get('author_email', '') LICENSE = metadata.get('license', 'unknown') URL = metadata.get('url', 'http://astropy.org') # Get the long description from the package's docstring __import__(PACKAGENAME) package = sys.modules[PACKAGENAME] LONG_DESCRIPTION = package.__doc__ # Store the package name in a built-in variable so it's easy # to get from other parts of the setup infrastructure builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) VERSION = '0.5.dev' # Indicates if this version is a release version RELEASE = 'dev' not in VERSION if not RELEASE: VERSION += get_git_devstr(False) # Populate the dict of setup command overrides; this should be done before # invoking any other functionality from distutils since it can potentially # modify distutils' behavior. cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) # Adjust the compiler in case the default on this platform is to use a # broken one. adjust_compiler(PACKAGENAME) # Freeze build information in version.py generate_version_py(PACKAGENAME, VERSION, RELEASE, get_debug_option(PACKAGENAME)) # Treat everything in scripts except README.rst as a script to be installed scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) if os.path.basename(fname) != 'README.rst'] # Get configuration information from all of the various subpackages. # See the docstring for setup_helpers.update_package_files for more # details. package_info = get_package_info() # Add the project-global data package_info['package_data'].setdefault(PACKAGENAME, []) package_info['package_data'][PACKAGENAME].append('data/*') # Define entry points for command-line scripts entry_points = {} entry_points['console_scripts'] = [ 'astropy-package-template-example = packagename.example_mod:main', ] # Include all .c files, recursively, including those generated by # Cython, since we can not do this in MANIFEST.in with a "dynamic" # directory name. c_files = [] for root, dirs, files in os.walk(PACKAGENAME): for filename in files: if filename.endswith('.c'): c_files.append( os.path.join( os.path.relpath(root, PACKAGENAME), filename)) package_info['package_data'][PACKAGENAME].extend(c_files) # Note that requires and provides should not be included in the call to # ``setup``, since these are now deprecated. See this link for more details: # https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM # provides=[PACKAGENAME], setup(name=PACKAGENAME, version=VERSION, description=DESCRIPTION, scripts=scripts, dependency_links=['https://github.com/low-sky/gbtpipe/tarball/master#egg=gbtpipe-0.1.2'], Install_requires=['astropy>=1.1','matplotlib>=1.5.1', 'scipy>=0.17.0','gbtpipe>0.1.1'], author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, long_description=LONG_DESCRIPTION, cmdclass=cmdclassd, zip_safe=False, use_2to3=True, entry_points=entry_points, **package_info )
mit
hopgal/data-science-from-scratch
code/working_with_data.py
61
16549
from __future__ import division from collections import Counter, defaultdict from functools import partial from linear_algebra import shape, get_row, get_column, make_matrix, \ vector_mean, vector_sum, dot, magnitude, vector_subtract, scalar_multiply from statistics import correlation, standard_deviation, mean from probability import inverse_normal_cdf from gradient_descent import maximize_batch import math, random, csv import matplotlib.pyplot as plt import dateutil.parser def bucketize(point, bucket_size): """floor the point to the next lower multiple of bucket_size""" return bucket_size * math.floor(point / bucket_size) def make_histogram(points, bucket_size): """buckets the points and counts how many in each bucket""" return Counter(bucketize(point, bucket_size) for point in points) def plot_histogram(points, bucket_size, title=""): histogram = make_histogram(points, bucket_size) plt.bar(histogram.keys(), histogram.values(), width=bucket_size) plt.title(title) plt.show() def compare_two_distributions(): random.seed(0) uniform = [random.randrange(-100,101) for _ in range(200)] normal = [57 * inverse_normal_cdf(random.random()) for _ in range(200)] plot_histogram(uniform, 10, "Uniform Histogram") plot_histogram(normal, 10, "Normal Histogram") def random_normal(): """returns a random draw from a standard normal distribution""" return inverse_normal_cdf(random.random()) xs = [random_normal() for _ in range(1000)] ys1 = [ x + random_normal() / 2 for x in xs] ys2 = [-x + random_normal() / 2 for x in xs] def scatter(): plt.scatter(xs, ys1, marker='.', color='black', label='ys1') plt.scatter(xs, ys2, marker='.', color='gray', label='ys2') plt.xlabel('xs') plt.ylabel('ys') plt.legend(loc=9) plt.show() def correlation_matrix(data): """returns the num_columns x num_columns matrix whose (i, j)th entry is the correlation between columns i and j of data""" _, num_columns = shape(data) def matrix_entry(i, j): return correlation(get_column(data, i), get_column(data, j)) return make_matrix(num_columns, num_columns, matrix_entry) def make_scatterplot_matrix(): # first, generate some random data num_points = 100 def random_row(): row = [None, None, None, None] row[0] = random_normal() row[1] = -5 * row[0] + random_normal() row[2] = row[0] + row[1] + 5 * random_normal() row[3] = 6 if row[2] > -2 else 0 return row random.seed(0) data = [random_row() for _ in range(num_points)] # then plot it _, num_columns = shape(data) fig, ax = plt.subplots(num_columns, num_columns) for i in range(num_columns): for j in range(num_columns): # scatter column_j on the x-axis vs column_i on the y-axis if i != j: ax[i][j].scatter(get_column(data, j), get_column(data, i)) # unless i == j, in which case show the series name else: ax[i][j].annotate("series " + str(i), (0.5, 0.5), xycoords='axes fraction', ha="center", va="center") # then hide axis labels except left and bottom charts if i < num_columns - 1: ax[i][j].xaxis.set_visible(False) if j > 0: ax[i][j].yaxis.set_visible(False) # fix the bottom right and top left axis labels, which are wrong because # their charts only have text in them ax[-1][-1].set_xlim(ax[0][-1].get_xlim()) ax[0][0].set_ylim(ax[0][1].get_ylim()) plt.show() def parse_row(input_row, parsers): """given a list of parsers (some of which may be None) apply the appropriate one to each element of the input_row""" return [parser(value) if parser is not None else value for value, parser in zip(input_row, parsers)] def parse_rows_with(reader, parsers): """wrap a reader to apply the parsers to each of its rows""" for row in reader: yield parse_row(row, parsers) def try_or_none(f): """wraps f to return None if f raises an exception assumes f takes only one input""" def f_or_none(x): try: return f(x) except: return None return f_or_none def parse_row(input_row, parsers): return [try_or_none(parser)(value) if parser is not None else value for value, parser in zip(input_row, parsers)] def try_parse_field(field_name, value, parser_dict): """try to parse value using the appropriate function from parser_dict""" parser = parser_dict.get(field_name) # None if no such entry if parser is not None: return try_or_none(parser)(value) else: return value def parse_dict(input_dict, parser_dict): return { field_name : try_parse_field(field_name, value, parser_dict) for field_name, value in input_dict.iteritems() } # # # MANIPULATING DATA # # def picker(field_name): """returns a function that picks a field out of a dict""" return lambda row: row[field_name] def pluck(field_name, rows): """turn a list of dicts into the list of field_name values""" return map(picker(field_name), rows) def group_by(grouper, rows, value_transform=None): # key is output of grouper, value is list of rows grouped = defaultdict(list) for row in rows: grouped[grouper(row)].append(row) if value_transform is None: return grouped else: return { key : value_transform(rows) for key, rows in grouped.iteritems() } def percent_price_change(yesterday, today): return today["closing_price"] / yesterday["closing_price"] - 1 def day_over_day_changes(grouped_rows): # sort the rows by date ordered = sorted(grouped_rows, key=picker("date")) # zip with an offset to get pairs of consecutive days return [{ "symbol" : today["symbol"], "date" : today["date"], "change" : percent_price_change(yesterday, today) } for yesterday, today in zip(ordered, ordered[1:])] # # # RESCALING DATA # # def scale(data_matrix): num_rows, num_cols = shape(data_matrix) means = [mean(get_column(data_matrix,j)) for j in range(num_cols)] stdevs = [standard_deviation(get_column(data_matrix,j)) for j in range(num_cols)] return means, stdevs def rescale(data_matrix): """rescales the input data so that each column has mean 0 and standard deviation 1 ignores columns with no deviation""" means, stdevs = scale(data_matrix) def rescaled(i, j): if stdevs[j] > 0: return (data_matrix[i][j] - means[j]) / stdevs[j] else: return data_matrix[i][j] num_rows, num_cols = shape(data_matrix) return make_matrix(num_rows, num_cols, rescaled) # # DIMENSIONALITY REDUCTION # X = [ [20.9666776351559,-13.1138080189357], [22.7719907680008,-19.8890894944696], [25.6687103160153,-11.9956004517219], [18.0019794950564,-18.1989191165133], [21.3967402102156,-10.8893126308196], [0.443696899177716,-19.7221132386308], [29.9198322142127,-14.0958668502427], [19.0805843080126,-13.7888747608312], [16.4685063521314,-11.2612927034291], [21.4597664701884,-12.4740034586705], [3.87655283720532,-17.575162461771], [34.5713920556787,-10.705185165378], [13.3732115747722,-16.7270274494424], [20.7281704141919,-8.81165591556553], [24.839851437942,-12.1240962157419], [20.3019544741252,-12.8725060780898], [21.9021426929599,-17.3225432396452], [23.2285885715486,-12.2676568419045], [28.5749111681851,-13.2616470619453], [29.2957424128701,-14.6299928678996], [15.2495527798625,-18.4649714274207], [26.5567257400476,-9.19794350561966], [30.1934232346361,-12.6272709845971], [36.8267446011057,-7.25409849336718], [32.157416823084,-10.4729534347553], [5.85964365291694,-22.6573731626132], [25.7426190674693,-14.8055803854566], [16.237602636139,-16.5920595763719], [14.7408608850568,-20.0537715298403], [6.85907008242544,-18.3965586884781], [26.5918329233128,-8.92664811750842], [-11.2216019958228,-27.0519081982856], [8.93593745011035,-20.8261235122575], [24.4481258671796,-18.0324012215159], [2.82048515404903,-22.4208457598703], [30.8803004755948,-11.455358009593], [15.4586738236098,-11.1242825084309], [28.5332537090494,-14.7898744423126], [40.4830293441052,-2.41946428697183], [15.7563759125684,-13.5771266003795], [19.3635588851727,-20.6224770470434], [13.4212840786467,-19.0238227375766], [7.77570680426702,-16.6385739839089], [21.4865983854408,-15.290799330002], [12.6392705930724,-23.6433305964301], [12.4746151388128,-17.9720169566614], [23.4572410437998,-14.602080545086], [13.6878189833565,-18.9687408182414], [15.4077465943441,-14.5352487124086], [20.3356581548895,-10.0883159703702], [20.7093833689359,-12.6939091236766], [11.1032293684441,-14.1383848928755], [17.5048321498308,-9.2338593361801], [16.3303688220188,-15.1054735529158], [26.6929062710726,-13.306030567991], [34.4985678099711,-9.86199941278607], [39.1374291499406,-10.5621430853401], [21.9088956482146,-9.95198845621849], [22.2367457578087,-17.2200123442707], [10.0032784145577,-19.3557700653426], [14.045833906665,-15.871937521131], [15.5640911917607,-18.3396956121887], [24.4771926581586,-14.8715313479137], [26.533415556629,-14.693883922494], [12.8722580202544,-21.2750596021509], [24.4768291376862,-15.9592080959207], [18.2230748567433,-14.6541444069985], [4.1902148367447,-20.6144032528762], [12.4332594022086,-16.6079789231489], [20.5483758651873,-18.8512560786321], [17.8180560451358,-12.5451990696752], [11.0071081078049,-20.3938092335862], [8.30560561422449,-22.9503944138682], [33.9857852657284,-4.8371294974382], [17.4376502239652,-14.5095976075022], [29.0379635148943,-14.8461553663227], [29.1344666599319,-7.70862921632672], [32.9730697624544,-15.5839178785654], [13.4211493998212,-20.150199857584], [11.380538260355,-12.8619410359766], [28.672631499186,-8.51866271785711], [16.4296061111902,-23.3326051279759], [25.7168371582585,-13.8899296143829], [13.3185154732595,-17.8959160024249], [3.60832478605376,-25.4023343597712], [39.5445949652652,-11.466377647931], [25.1693484426101,-12.2752652925707], [25.2884257196471,-7.06710309184533], [6.77665715793125,-22.3947299635571], [20.1844223778907,-16.0427471125407], [25.5506805272535,-9.33856532270204], [25.1495682602477,-7.17350567090738], [15.6978431006492,-17.5979197162642], [37.42780451491,-10.843637288504], [22.974620174842,-10.6171162611686], [34.6327117468934,-9.26182440487384], [34.7042513789061,-6.9630753351114], [15.6563953929008,-17.2196961218915], [25.2049825789225,-14.1592086208169] ] def de_mean_matrix(A): """returns the result of subtracting from every value in A the mean value of its column. the resulting matrix has mean 0 in every column""" nr, nc = shape(A) column_means, _ = scale(A) return make_matrix(nr, nc, lambda i, j: A[i][j] - column_means[j]) def direction(w): mag = magnitude(w) return [w_i / mag for w_i in w] def directional_variance_i(x_i, w): """the variance of the row x_i in the direction w""" return dot(x_i, direction(w)) ** 2 def directional_variance(X, w): """the variance of the data in the direction w""" return sum(directional_variance_i(x_i, w) for x_i in X) def directional_variance_gradient_i(x_i, w): """the contribution of row x_i to the gradient of the direction-w variance""" projection_length = dot(x_i, direction(w)) return [2 * projection_length * x_ij for x_ij in x_i] def directional_variance_gradient(X, w): return vector_sum(directional_variance_gradient_i(x_i,w) for x_i in X) def first_principal_component(X): guess = [1 for _ in X[0]] unscaled_maximizer = maximize_batch( partial(directional_variance, X), # is now a function of w partial(directional_variance_gradient, X), # is now a function of w guess) return direction(unscaled_maximizer) def first_principal_component_sgd(X): guess = [1 for _ in X[0]] unscaled_maximizer = maximize_stochastic( lambda x, _, w: directional_variance_i(x, w), lambda x, _, w: directional_variance_gradient_i(x, w), X, [None for _ in X], guess) return direction(unscaled_maximizer) def project(v, w): """return the projection of v onto w""" coefficient = dot(v, w) return scalar_multiply(coefficient, w) def remove_projection_from_vector(v, w): """projects v onto w and subtracts the result from v""" return vector_subtract(v, project(v, w)) def remove_projection(X, w): """for each row of X projects the row onto w, and subtracts the result from the row""" return [remove_projection_from_vector(x_i, w) for x_i in X] def principal_component_analysis(X, num_components): components = [] for _ in range(num_components): component = first_principal_component(X) components.append(component) X = remove_projection(X, component) return components def transform_vector(v, components): return [dot(v, w) for w in components] def transform(X, components): return [transform_vector(x_i, components) for x_i in X] if __name__ == "__main__": print "correlation(xs, ys1)", correlation(xs, ys1) print "correlation(xs, ys2)", correlation(xs, ys2) # safe parsing data = [] with open("comma_delimited_stock_prices.csv", "rb") as f: reader = csv.reader(f) for line in parse_rows_with(reader, [dateutil.parser.parse, None, float]): data.append(line) for row in data: if any(x is None for x in row): print row print "stocks" with open("stocks.txt", "rb") as f: reader = csv.DictReader(f, delimiter="\t") data = [parse_dict(row, { 'date' : dateutil.parser.parse, 'closing_price' : float }) for row in reader] max_aapl_price = max(row["closing_price"] for row in data if row["symbol"] == "AAPL") print "max aapl price", max_aapl_price # group rows by symbol by_symbol = defaultdict(list) for row in data: by_symbol[row["symbol"]].append(row) # use a dict comprehension to find the max for each symbol max_price_by_symbol = { symbol : max(row["closing_price"] for row in grouped_rows) for symbol, grouped_rows in by_symbol.iteritems() } print "max price by symbol" print max_price_by_symbol # key is symbol, value is list of "change" dicts changes_by_symbol = group_by(picker("symbol"), data, day_over_day_changes) # collect all "change" dicts into one big list all_changes = [change for changes in changes_by_symbol.values() for change in changes] print "max change", max(all_changes, key=picker("change")) print "min change", min(all_changes, key=picker("change")) # to combine percent changes, we add 1 to each, multiply them, and subtract 1 # for instance, if we combine +10% and -20%, the overall change is # (1 + 10%) * (1 - 20%) - 1 = 1.1 * .8 - 1 = -12% def combine_pct_changes(pct_change1, pct_change2): return (1 + pct_change1) * (1 + pct_change2) - 1 def overall_change(changes): return reduce(combine_pct_changes, pluck("change", changes)) overall_change_by_month = group_by(lambda row: row['date'].month, all_changes, overall_change) print "overall change by month" print overall_change_by_month print "rescaling" data = [[1, 20, 2], [1, 30, 3], [1, 40, 4]] print "original: ", data print "scale: ", scale(data) print "rescaled: ", rescale(data) print print "PCA" Y = de_mean_matrix(X) components = principal_component_analysis(Y, 2) print "principal components", components print "first point", Y[0] print "first point transformed", transform_vector(Y[0], components)
unlicense
Bleyddyn/malpi
exp/test_speed.py
1
7664
from time import time import datetime from optparse import OptionParser import numpy as np import matplotlib.pyplot as plt from scipy import ndimage from scipy import misc from malpi.cnn import * from malpi.lstm import * from malpi.data_utils import get_CIFAR10_data from malpi.solver import Solver from malpi.rnn_layers import * def plot_solver(solver): plt.subplot(2, 1, 1) plt.plot(solver.loss_history, 'o') plt.xlabel('iteration') plt.ylabel('loss') plt.subplot(2, 1, 2) plt.plot(solver.train_acc_history, '-o') plt.plot(solver.val_acc_history, '-o') plt.legend(['train', 'val'], loc='upper left') plt.xlabel('epoch') plt.ylabel('accuracy') plt.show() def getCIFAR10(verbose=True): data = get_CIFAR10_data(num_training=9000) if verbose: for k, v in data.iteritems(): print '%s: ' % k, v.shape return data def log( message ): logFileName = "test1.log" fmt = '%Y-%m-%d-%H-%M-%S' datestr = datetime.datetime.now().strftime(fmt) with open(logFileName,'a') as outf: outf.write(datestr + ": " + message + "\n") def test1(): hp = { 'reg': 3.37091767808e-05, 'lr': 0.000182436504066, 'decay': 1.0 } #layers = ["conv-64", "Conv-64", "maxpool", "FC-1000", "fc-1000"] #layer_params = [{'stride':1, 'dropout':0.5}, {'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2}, (), ()] name = "SimpleTest1" layers = ["conv-32", "Conv-32", "maxpool", "FC-1000", "fc-10"] layer_params = [{'filter_size':3}, {'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2}, (), {'relu':False}] model = MalpiConvNet(layers, layer_params, reg=hp['reg'], dtype=np.float16) data = getCIFAR10() solver = Solver(model, data, num_epochs=1, batch_size=50, lr_decay=hp['decay'], update_rule='adam', optim_config={ 'learning_rate': hp['lr'], }, verbose=True, print_every=50) t_start = time() solver.train() model.save(name+".pickle") log( name + " train time (m): " + str(((time() - t_start) / 60.0)) ) log( name + " hyperparams: " + str(hp) ) def classify(data): model = load_malpi('SimpleTest1.pickle') scores = model.loss(data) print scores def testload(): model = load_malpi('SimpleTest1.pickle') data = getCIFAR10(verbose=False) solver = Solver(model, data) train_acc = solver.check_accuracy(data["X_train"], data["y_train"], num_samples=1000) val_acc = solver.check_accuracy(data["X_val"], data["y_val"]) print "train acc: %f; val_acc: %f" % (train_acc,val_acc) def hyperparameterSearch(): best_val_acc = 0.0 best_model = None best_solver = None #reguls = 10 ** np.random.uniform(-5, -4, 2) #[0.0001, 0.001, 0.01] #lrs = 10 ** np.random.uniform(-6, -3, 5) #[1e-4, 1e-3, 1e-2] #reguls = np.append([3.37091767808e-05],reguls) #lrs = np.append([0.000182436504066],lrs) variations = np.array([0.9,1.0,1.1]) reguls = np.array([3.37091767808e-05]) * variations lrs = np.array([0.000182436504066]) * variations features = [48] #[8, 16, 32, 48, 64] decays = [1.0] val_accs = [] hdims = 1000 """ Try: Conv-64, Conv-64, maxpool, conv-128, conv-128, maxpool, conv-256, conv-256, maxpool, conv-512, conv-512, maxpool, conv-512, conv-512, maxpool, FC-4096, FC-4096, FC-1000, softmax Dropout? """ for reg in reguls: for lr in lrs: for feat in features: for decay in decays: model = MultiLayerConvNet(num_filters=feat, filter_size=3, weight_scale=0.001, hidden_dim=hdims, reg=reg, dropout=0.5) solver = Solver(model, data, num_epochs=6, batch_size=50, lr_decay=decay, update_rule='adam', optim_config={ 'learning_rate': lr, }, verbose=False, print_every=50) t_start = time() solver.train() val_accs.append(solver.best_val_acc) if solver.best_val_acc > best_val_acc: best_val_acc = solver.best_val_acc best_model = model best_solver = solver print 'acc\t#filts\thdims\treg\tlr\tTime:' print '%f\t%d\t%d\t%f\t%f\t%fm' % (solver.best_val_acc,feat,hdims,reg,lr,(time() - t_start)/60.0) #plot_solver(solver) plot_solver(best_solver) #print('\a') # Sound a bell #print('\a') #print('\a') print best_solver.best_val_acc print best_model.reg print best_solver.optim_config['learning_rate'] print val_accs def testDescribe(): model = load_malpi('SimpleTest1.pickle') model.describe() def getOneImage(imsize): image = ndimage.imread('test_data/image.jpeg') #image.shape (480, 720, 3) image = image.transpose(2,1,0) # shape = (3, 720, 480) min = (720 - 480) / 2 image = image[:,min:min+480,:] image = misc.imresize(image,(imsize,imsize)) # shape = (3, 480, 480) image = image.reshape(1,3,imsize,imsize) return image # input_dim: Tuple (C, H, W) giving size of input data. def softmax(x): probs = np.exp(x - np.max(x)) probs /= np.sum(probs ) return probs def speedTest( load=False ): # 0.5s for a single forward pass: #imsize = 239 # layers = ["conv-8", "maxpool", "conv-16", "maxpool", "conv-32", "fc-10"] # layer_params = [{'filter_size':3, 'stride':2, 'pad':1 }, {'pool_stride':4, 'pool_width':4, 'pool_height':4}, # {'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2}, # {'filter_size':3}, # {'relu':False}] name = "MalpiThree_v1" imsize = 79 image = getOneImage(imsize) image = image.astype(np.float32) if load: model = load_malpi(name+'.pickle') else: layers = ["conv-8", "maxpool", "conv-16", "maxpool", "conv-32"] layer_params = [{'filter_size':3, 'stride':2, 'pad':1 }, {'pool_stride':2, 'pool_width':2, 'pool_height':2}, {'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2}, {'filter_size':3} ] model = MalpiConvNet(layers, layer_params, input_dim=(3,imsize,imsize), reg=.005, dtype=np.float32, verbose=True) model.describe() N = 1 D = 32*10*10 H = 500 nA = 5 lstm_model = MalpiLSTM( D, H, nA, dtype=np.float32 ) t_start = time() count = 10 for x in range(count): cnn_out = model.loss(image) lstm_x = np.reshape( cnn_out, (1,D) ) actions = lstm_model.loss(lstm_x) #print actions print "Avg elapsed time: %f" % ((time() - t_start)/count,) model.save(name+'.pickle') def describeModel( name ): model = load_malpi(name+'.pickle') # if not hasattr(model, 'input_dim'): # model.input_dim = (3,imsize,imsize) model.describe() # model.save(name+'.pickle') def getOptions(): parser = OptionParser() parser.add_option("-d","--describe",dest="name",help="Describe a model saved in a pickle file: <name>.pickle"); (options, args) = parser.parse_args() return (options, args) if __name__ == "__main__": (options, args) = getOptions() if options.name: describeModel(options.name) else: speedTest( load=False ) #testload() #testDescribe() #test1()
mit
hobson/pug-invest
pug/invest/models.py
1
4142
from __future__ import print_function import datetime import concurrentpandas as ccp from django.db import models from jsonfield import JSONField import util import nlp import pandas as pd class Equity(models.Model): name = models.CharField(help_text='Full legal name of busienss or fund.', max_length=64, null=False, blank=False) symbol = models.CharField(max_length=10, null=False, blank=False) time_series = JSONField() class Day(models.Model): """Daily time series Data""" day = models.IntegerField(help_text="Trading days since Jan 1, 1970", null=True, blank=True) close = models.FloatField(null=True, default=None) actual_close = models.FloatField(null=True, default=None, blank=True) volume = models.FloatField(null=True, default=None, blank=True) date = models.DateField(null=False) datetime = models.DateTimeField(null=False) high = models.FloatField(null=True, default=None, blank=True) low = models.FloatField(null=True, default=None, blank=True) symbol = models.CharField(max_length=10, null=False, blank=False) equity = models.ForeignKey(Equity) def get_dataframes(symbols=("sne", "goog", "tsla"), source='yahoo', refresh=False): """Retreive table of market data ("Close", "Volume", "Adj Close") for each symbol requested >>> dfdict = get_dataframes('GOOG', 'SNE') """ symbols = util.make_symbols(list(symbols)) if refresh: symbols_to_refresh = symbols else: symbols_to_refresh = [sym for sym in symbols if not Equity.objects.filter(symbol=sym).exists()] source = source.lower().strip() if source in ('yahoo', 'google'): source += '_finance' if source[:3] == 'fed': source = 'federal_reserve_economic_data' ccpanda = ccp.ConcurrentPandas() # set the data source getattr(ccpanda, "set_source_" + source)() if symbols_to_refresh: # tell concurrent pandas which keys/symbols to retrieve ccpanda.insert_keys(symbols_to_refresh) # start concurrentpandas threads ccpanda.consume_keys_asynchronous_threads() # FIXME: is there a better/faster iterator to use like `ccpanda.output_map` attribute? pseudodict = ccpanda.return_map() else: pseudodict = {} table = {} for sym in symbols: e, created = None, False if not sym in symbols_to_refresh: e, created = Equity.objects.get_or_create(symbol=sym) if created or not e or not e.time_series or sym in symbols_to_refresh: e, created = Equity.objects.get_or_create( symbol=sym, name=sym, # FIXME: use data source to find equity name! time_series=pseudodict[sym].to_json(), ) table[sym] = pd.io.json.read_json(path_or_buf=e.time_series, orient='columns', typ='frame', convert_dates=True) return table def get_panel(*args, **kwargs): return pd.Panel(get_dataframes(*args, **kwargs)) def price_dataframe(symbols=('sne',), start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='actual_close', cleaner=util.clean_dataframe, ): """Retrieve the prices of a list of equities as a DataFrame (columns = symbols) Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"] start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. Yahoo data stops at 2013/1/1 """ if isinstance(price_type, basestring): price_type = [price_type] start = nlp.util.normalize_date(start or datetime.date(2008, 1, 1)) end = nlp.util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = util.make_symbols(symbols) df = get_dataframes(symbols) # t = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) # df = clean_dataframes(dataobj.get_data(t, symbols, price_type)) if not df or len(df) > 1: return cleaner(df) else: return cleaner(df[0])
mit
cl4rke/scikit-learn
examples/classification/plot_lda.py
164
2224
""" ==================================================================== Normal and Shrinkage Linear Discriminant Analysis for classification ==================================================================== Shows how shrinkage improves classification. """ from __future__ import division import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from sklearn.lda import LDA n_train = 20 # samples for training n_test = 200 # samples for testing n_averages = 50 # how often to repeat classification n_features_max = 75 # maximum number of features step = 4 # step size for the calculation def generate_data(n_samples, n_features): """Generate random blob-ish data with noisy features. This returns an array of input data with shape `(n_samples, n_features)` and an array of `n_samples` target labels. Only one feature contains discriminative information, the other features contain only noise. """ X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]]) # add non-discriminative features if n_features > 1: X = np.hstack([X, np.random.randn(n_samples, n_features - 1)]) return X, y acc_clf1, acc_clf2 = [], [] n_features_range = range(1, n_features_max + 1, step) for n_features in n_features_range: score_clf1, score_clf2 = 0, 0 for _ in range(n_averages): X, y = generate_data(n_train, n_features) clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y) clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y) X, y = generate_data(n_test, n_features) score_clf1 += clf1.score(X, y) score_clf2 += clf2.score(X, y) acc_clf1.append(score_clf1 / n_averages) acc_clf2.append(score_clf2 / n_averages) features_samples_ratio = np.array(n_features_range) / n_train plt.plot(features_samples_ratio, acc_clf1, linewidth=2, label="LDA with shrinkage", color='r') plt.plot(features_samples_ratio, acc_clf2, linewidth=2, label="LDA", color='g') plt.xlabel('n_features / n_samples') plt.ylabel('Classification accuracy') plt.legend(loc=1, prop={'size': 12}) plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)') plt.show()
bsd-3-clause