repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kiaakrami/An-Alternative-FWI-AFWI-Algorithm-for-Monitoring-Time-lapse-Velocity-Changes | data_generation.py | 1 | 7604 | ####### We start by importing some useful packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
import sys, getopt
import copy
from pysit import *
from pysit.gallery import marmousi
from pysit.util.parallel import *
from mpi4py import MPI
from scipy.io import savemat, loadmat
####### Here we define the plots
def plot_func(fig_nr, arr_2d, x_min, x_max, z_min, z_max, x_label, z_label, title, cbar_min=None, cbar_max=None):
fig = plt.figure(fig_nr)
ax = fig.add_subplot(111)
im = ax.imshow(arr_2d, extent=[x_min,x_max,z_max,z_min], interpolation="nearest")
im.axes.yaxis.set_label_text(z_label, fontsize = 10)
im.axes.xaxis.set_label_text(x_label, fontsize = 10)
im.axes.set_title(title, fontsize = 10)
if cbar_min !=None and cbar_max !=None:
norm = mpl.colors.Normalize(vmin=cbar_min, vmax=cbar_max)
im.set_norm(norm)
cb = plt.colorbar(im, ticks=np.linspace(cbar_min, cbar_max, 5))
else:
cb = plt.colorbar(im)
return fig
####### Here we define parallel shots
def make_parallel_shots(pwrap, nsources, x_pos_sources_arr_all, z_pos_sources, x_pos_receivers_arr_all, z_pos_receivers, peakfreq):
min_nr_per_process = nsources / pwrap.size
nr_leftover_processes = nsources % (min_nr_per_process * pwrap.size)
nr_shots_this_process = min_nr_per_process
if pwrap.rank < nr_leftover_processes:
nr_shots_this_process += 1
local_shots = []
local_shots_indices = []
for i in xrange(nr_shots_this_process):
all_shot_index = i*pwrap.size + pwrap.rank
print "CREATING SHOT WITH INDEX: %i"%all_shot_index
source = PointSource(m, (x_pos_sources_arr_all[all_shot_index], z_pos_sources), RickerWavelet(peakfreq), approximation='gaussian')
####### Here we define set of receivers
receivers = ReceiverSet(m, [PointReceiver(m, (x, z_pos_receivers), approximation='gaussian') for x in x_pos_receivers])
####### Here we create and store the shots
shot = Shot(source, receivers)
local_shots.append(shot)
local_shots_indices.append(i)
return local_shots, local_shots_indices
if __name__ == '__main__':
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
print "size = %i and rank = %i"%(size, rank)
pwrap = ParallelWrapShot(comm=comm)
####### Here we set the wave speed
WaveSpeed.add_lower_bound(1000.0)
WaveSpeed.add_upper_bound(6500.0)
x_lbc = PML(600.0,100.0); x_rbc = PML(600.0,100.0); z_lbc = PML(600.0,100.0); z_rbc = PML(600.0,100.0)
C, C0, m, d = marmousi(patch='mini_square', x_lbc = x_lbc, x_rbc = x_rbc, z_lbc = z_lbc, z_rbc = z_rbc)
n_nodes_x = m.x.n
n_nodes_z = m.z.n
dx = m.x.delta
dz = m.z.delta
x_min = d.x.lbound
x_max = d.x.rbound
z_min = d.z.lbound
z_max = d.z.rbound
x_min_km = x_min/1000.0; x_max_km = x_max/1000.0; z_min_km = z_min/1000.0; z_max_km = z_max/1000.0
x_label = 'Horizontal coordinate (km)'
z_label = 'Depth (km)'
title_marm_baseline_true = 'True Marmousi baseline'
title_marm_init = 'Initial Marmousi'
cbar_min_vel = 1500.0
cbar_max_vel = 4600.0
nsources = 19
nreceiver = n_nodes_x
source_spacing = 480.0
x_pos_sources_baseline = np.arange(0.5*source_spacing, x_max, source_spacing)
x_pos_sources_monitor = x_pos_sources_baseline - 240.0
z_pos_sources = z_min + dz
x_pos_receivers = np.linspace(x_min, x_max, n_nodes_x)
z_pos_receivers = z_min + dz
peakfreq = 6.0
local_shots_baseline, local_shots_baseline_indices = make_parallel_shots(pwrap, nsources, x_pos_sources_baseline, z_pos_sources, x_pos_receivers, z_pos_receivers, peakfreq)
local_shots_monitor , local_shots_monitor_indices = make_parallel_shots(pwrap, nsources, x_pos_sources_monitor , z_pos_sources, x_pos_receivers, z_pos_receivers, peakfreq)
trange = (0.0, 7.0)
solver = ConstantDensityAcousticWave(m,
spatial_accuracy_order=6,
trange=trange,
kernel_implementation='cpp')
marm_baseline_true = np.reshape(marm_baseline_true_2d, (n_nodes_z*n_nodes_x, 1), 'F')
marm_monitor_true = np.reshape( marm_monitor_true_2d, (n_nodes_z*n_nodes_x, 1), 'F')
marm_baseline_initial_inverted = np.reshape(marm_baseline_initial_inverted_2d, (n_nodes_z*n_nodes_x, 1), 'F')
marm_baseline_true_model = solver.ModelParameters(m,{'C': marm_baseline_true})
marm_monitor_true_model = solver.ModelParameters(m,{'C': marm_monitor_true})
marm_init_model_baseline = solver.ModelParameters(m,{'C': marm_baseline_initial_inverted})
marm_init_model_monitor = copy.deepcopy(marm_init_model_baseline)
marm_init_model = JointModel(marm_init_model_baseline, marm_init_model_monitor)
tt = time.time()
generate_seismic_data(local_shots_baseline, solver, marm_baseline_true_model)
print 'Baseline data generation: {0}s'.format(time.time()-tt)
tt = time.time()
generate_seismic_data(local_shots_monitor, solver, marm_monitor_true_model)
print 'Monitor data generation: {0}s'.format(time.time()-tt)
####### Inversion algorithm
objective = TemporalLeastSquares(solver, parallel_wrap_shot=pwrap)
invalg_joint = LBFGS(objective, memory_length=6)
tt = time.time()
nsteps = 30
status_configuration = {'value_frequency' : 1,
'residual_length_frequency' : 1,
'objective_frequency' : 1,
'step_frequency' : 1,
'step_length_frequency' : 1,
'gradient_frequency' : 1,
'gradient_length_frequency' : 1,
'run_time_frequency' : 1,
'alpha_frequency' : 1,
}
line_search = 'backtrack'
if rank == 0:
print "backtrack linesearch is not optimal"
beta = np.reshape(beta_2d, (n_nodes_z*n_nodes_x,1),'F')
beta_normalize = beta/beta.max()
beta_min = 0.001*np.max(beta_normalize)
beta_normalize[np.where(beta_normalize <= beta_min)] = beta_min
model_reg_term_scale = 1e4
result = invalg_joint(local_shots_baseline, local_shots_monitor, beta_normalize, model_reg_term_scale, marm_init_model, nsteps,line_search=line_search,status_configuration=status_configuration, verbose=True)
result_baseline_model = result.m_0
result_monitor_model = result.m_1
if rank == 0:
###### Saving the results
marm_baseline_inverted_2d = result_baseline_model.C.reshape((n_nodes_z,n_nodes_x), order='F')
marm_monitor_inverted_2d = result_monitor_model.C.reshape( (n_nodes_z,n_nodes_x), order='F')
marm_diff_inverted_2d = marm_monitor_inverted_2d - marm_baseline_inverted_2d
out = {'marm_baseline_inverted_bounded_2d':marm_baseline_inverted_2d, 'marm_monitor_inverted_bounded_2d':marm_monitor_inverted_2d, 'marm_diff_inverted_bounded_2d':marm_diff_inverted_2d}
savemat('joint_output_' + str(nsteps) + '_model_reg_term_scale_' + str(model_reg_term_scale) + '_beta_min_' + str(beta_min) + '.mat',out)
print '...run time: {0}s'.format(time.time()-tt)
| gpl-3.0 |
vsoch/nidmviewer | nidmviewer/templates.py | 1 | 2863 | '''
templates.py: part of the nidmviewer package
Functions to work with html templates
Copyright (c) 2014-2018, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from nidmviewer.utils import get_package_dir
import pandas
import os
import re
def get_template(html_name):
return read_template(html_name)
# Add code string to end of template
def add_javascript_function(function_code,template):
template.append("<script>\n%s\n</script>" % (function_code))
return template
# Remove scripts (css or js) from html_snippet
def remove_resources(html_snippet,script_names):
expression = re.compile("|".join(script_names))
filtered_template = [x for x in html_snippet if not expression.search(x)]
return filtered_template
def save_template(html_snippet,output_file):
filey = open(output_file,"wb")
filey.writelines(html_snippet)
filey.close()
def read_template(html_name):
ppwd = get_package_dir()
html_name = html_name + ".html"
template_file = os.path.abspath(os.path.join(ppwd,'template', html_name))
filey = open(template_file,"r")
template = "".join(filey.readlines())
filey.close()
return template
def add_string(tag,substitution,template):
template = template.replace(tag,substitution)
return template
'''Get an image by name in the img directory'''
def get_image(image_name):
ppwd = get_package_dir()
return os.path.join(ppwd,'img', image_name)
| bsd-3-clause |
sergeyk/vislab | vislab/dataset_viz.py | 4 | 5470 | import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import vislab
import vislab.dataset_stats
import vislab.gg
def plot_column_frequencies(df, column, top_k=20):
"""
Plot bar chart of frequencies of top_k values of a column in the df.
"""
column_vals = df[column].value_counts()[:top_k]
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(111)
column_vals.plot(ax=ax, kind='bar', title='{} Frequency'.format(column))
ax.set_xlabel('')
fig.autofmt_xdate()
vislab.gg.rstyle(ax)
return fig
def plot_conditional_occurrence(
df_m, size=None, cmap=plt.cm.gray_r, color_anchor=[0, 1],
x_tick_rot=90, title=None, plot_vals=True, sort_by_prior=True,
font_size=12):
"""
Plot the occurrence of the columns of the given DataFrame
conditioned on the occurrence of its rows.
Each row therefore sums to 1, excepting the last column, which
is the prior probability of the row value.
Parameters
----------
df_m: pandas.DataFrame
Cells contain joint occurrences between index and column.
size: tuple [None]
Optional argument to figsize.
cmap: matplotlib.cmap [gray]
color_anchor: ?
x_tick_rot: float
title: string
plot_vals: bool [True]
If true, actual values are plotted.
sort_by_prior: bool [True]
"""
df_m = vislab.dataset_stats.condition_df_on_row(df_m)
if sort_by_prior:
df_m = df_m.sort('prior', ascending=False)
fig = plot_occurrence(
df_m, size, cmap, color_anchor, x_tick_rot, title, plot_vals, font_size)
ax = fig.get_axes()[0]
# Plot line separating 'nothing' and 'prior' from rest of plot
M, N = df_m.shape
l = ax.add_line(mpl.lines.Line2D(
[N - 1.5, N - 1.5], [-.5, M - 0.5],
ls='--', c='gray', lw=2))
l.set_zorder(3)
return fig
def plot_occurrence(
df_m, size=None, cmap=plt.cm.gray_r, color_anchor=[0, 1],
x_tick_rot=90, title=None, plot_vals=True, font_size=12):
"""
TODO
"""
M, N = df_m.shape
# Initialize figure of given size.
if size is None:
w = max(12, N)
h = max(12, M)
size = (w, h)
fig = plt.figure(figsize=size)
ax_im = fig.add_subplot(111)
# Make axes for colorbar.
divider = make_axes_locatable(ax_im)
ax_cb = divider.new_vertical(size="5%", pad=0.1, pack_start=True)
fig.add_axes(ax_cb)
# The call to imshow produces the matrix plot.
im = ax_im.imshow(df_m, origin='upper', interpolation='nearest',
vmin=color_anchor[0], vmax=color_anchor[1], cmap=cmap)
# Formatting.
ax = ax_im
ax.set_xticks(np.arange(N))
ax.set_xticklabels(df_m.columns)
for tick in ax.xaxis.iter_ticks():
tick[0].label2On = True
tick[0].label1On = False
tick[0].label2.set_rotation(x_tick_rot)
#tick[0].label2.set_fontsize('x-large')
ax.set_yticks(np.arange(M))
ax.set_yticklabels(df_m.index)
ax.yaxis.set_minor_locator(
mpl.ticker.FixedLocator(np.arange(-.5, M + 0.5)))
ax.xaxis.set_minor_locator(
mpl.ticker.FixedLocator(np.arange(-.5, N - 0.5)))
ax.grid(False, which='major')
ax.grid(True, which='minor', ls='-', lw=7, c='w')
# Make the major and minor tick marks invisible
for line in ax.xaxis.get_ticklines() + ax.yaxis.get_ticklines():
line.set_markeredgewidth(0)
for line in ax.xaxis.get_minorticklines() + ax.yaxis.get_minorticklines():
line.set_markeredgewidth(0)
# Limit the area of the plot
ax.set_ybound([-0.5, M - 0.5])
ax.set_xbound([-0.5, N - 0.5])
# The following produces the colorbar and sets the ticks
# Set the ticks - if 0 is in the interval of values, set that, as well
# as the maximal and minimal values:
# Extract the minimum and maximum values for scaling
max_val = np.nanmax(df_m)
min_val = np.nanmin(df_m)
if min_val < 0:
ticks = [color_anchor[0], min_val, 0, max_val, color_anchor[1]]
# Otherwise - only set the maximal value:
else:
ticks = [color_anchor[0], max_val, color_anchor[1]]
# Display the actual values in the cells
if plot_vals:
for i in xrange(0, M):
for j in xrange(0, N):
val = float(df_m.iloc[i, j])
if np.isnan(val):
continue
if val / (color_anchor[1] - color_anchor[0]) > 0.5:
ax.text(j - 0.25, i + 0.1, '%.2f' % val, color='w', size=font_size-2)
else:
ax.text(j - 0.25, i + 0.1, '%.2f' % val, color='k', size=font_size-2)
# Hide the black frame around the plot
# Doing ax.set_frame_on(False) results in weird thin lines
# from imshow() at the edges. Instead, we set the frame to white.
for spine in ax.spines.values():
spine.set_edgecolor('w')
# Set title
if title is not None:
ax.set_title(title)
# Plot the colorbar and remove its frame as well.
cb = fig.colorbar(im, cax=ax_cb, orientation='horizontal',
cmap=cmap, ticks=ticks, format='%.2f')
cb.ax.artists.remove(cb.outline)
# Set fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(font_size)
return fig
| bsd-2-clause |
CVML/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
srowen/spark | python/pyspark/pandas/datetimes.py | 15 | 26546 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Date/Time related functions on pandas-on-Spark Series
"""
from typing import Any, Optional, Union, TYPE_CHECKING, no_type_check
import numpy as np # noqa: F401 (SPARK-34943)
import pandas as pd # noqa: F401
from pandas.tseries.offsets import DateOffset
import pyspark.sql.functions as F
from pyspark.sql.types import DateType, TimestampType, LongType
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class DatetimeMethods(object):
"""Date/Time methods for pandas-on-Spark Series"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.spark.data_type, (DateType, TimestampType)):
raise ValueError(
"Cannot call DatetimeMethods on type {}".format(series.spark.data_type)
)
self._data = series
# Properties
@property
def date(self) -> "ps.Series":
"""
Returns a Series of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# TODO: Hit a weird exception
# syntax error in attribute name: `to_date(`start_date`)` with alias
return self._data.spark.transform(F.to_date)
@property
def time(self) -> "ps.Series":
raise NotImplementedError()
@property
def timetz(self) -> "ps.Series":
raise NotImplementedError()
@property
def year(self) -> "ps.Series":
"""
The year of the datetime.
"""
return self._data.spark.transform(lambda c: F.year(c).cast(LongType()))
@property
def month(self) -> "ps.Series":
"""
The month of the timestamp as January = 1 December = 12.
"""
return self._data.spark.transform(lambda c: F.month(c).cast(LongType()))
@property
def day(self) -> "ps.Series":
"""
The days of the datetime.
"""
return self._data.spark.transform(lambda c: F.dayofmonth(c).cast(LongType()))
@property
def hour(self) -> "ps.Series":
"""
The hours of the datetime.
"""
return self._data.spark.transform(lambda c: F.hour(c).cast(LongType()))
@property
def minute(self) -> "ps.Series":
"""
The minutes of the datetime.
"""
return self._data.spark.transform(lambda c: F.minute(c).cast(LongType()))
@property
def second(self) -> "ps.Series":
"""
The seconds of the datetime.
"""
return self._data.spark.transform(lambda c: F.second(c).cast(LongType()))
@property
def microsecond(self) -> "ps.Series":
"""
The microseconds of the datetime.
"""
@no_type_check
def pandas_microsecond(s) -> "ps.Series[np.int64]":
return s.dt.microsecond
return self._data.pandas_on_spark.transform_batch(pandas_microsecond)
@property
def nanosecond(self) -> "ps.Series":
raise NotImplementedError()
@property
def week(self) -> "ps.Series":
"""
The week ordinal of the year.
"""
return self._data.spark.transform(lambda c: F.weekofyear(c).cast(LongType()))
@property
def weekofyear(self) -> "ps.Series":
return self.week
weekofyear.__doc__ = week.__doc__
@property
def dayofweek(self) -> "ps.Series":
"""
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor).
Returns
-------
Series
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = ps.from_pandas(pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series())
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
dtype: int64
"""
@no_type_check
def pandas_dayofweek(s) -> "ps.Series[np.int64]":
return s.dt.dayofweek
return self._data.pandas_on_spark.transform_batch(pandas_dayofweek)
@property
def weekday(self) -> "ps.Series":
return self.dayofweek
weekday.__doc__ = dayofweek.__doc__
@property
def dayofyear(self) -> "ps.Series":
"""
The ordinal day of the year.
"""
@no_type_check
def pandas_dayofyear(s) -> "ps.Series[np.int64]":
return s.dt.dayofyear
return self._data.pandas_on_spark.transform_batch(pandas_dayofyear)
@property
def quarter(self) -> "ps.Series":
"""
The quarter of the date.
"""
@no_type_check
def pandas_quarter(s) -> "ps.Series[np.int64]":
return s.dt.quarter
return self._data.pandas_on_spark.transform_batch(pandas_quarter)
@property
def is_month_start(self) -> "ps.Series":
"""
Indicates whether the date is the first day of the month.
Returns
-------
Series
For Series, returns a Series with boolean values.
See Also
--------
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> s = ps.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
"""
@no_type_check
def pandas_is_month_start(s) -> "ps.Series[bool]":
return s.dt.is_month_start
return self._data.pandas_on_spark.transform_batch(pandas_is_month_start)
@property
def is_month_end(self) -> "ps.Series":
"""
Indicates whether the date is the last day of the month.
Returns
-------
Series
For Series, returns a Series with boolean values.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> s = ps.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
"""
@no_type_check
def pandas_is_month_end(s) -> "ps.Series[bool]":
return s.dt.is_month_end
return self._data.pandas_on_spark.transform_batch(pandas_is_month_end)
@property
def is_quarter_start(self) -> "ps.Series":
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> df = ps.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df
dates
0 2017-03-30
1 2017-03-31
2 2017-04-01
3 2017-04-02
>>> df.dates.dt.quarter
0 1
1 1
2 2
3 2
Name: dates, dtype: int64
>>> df.dates.dt.is_quarter_start
0 False
1 False
2 True
3 False
Name: dates, dtype: bool
"""
@no_type_check
def pandas_is_quarter_start(s) -> "ps.Series[bool]":
return s.dt.is_quarter_start
return self._data.pandas_on_spark.transform_batch(pandas_is_quarter_start)
@property
def is_quarter_end(self) -> "ps.Series":
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> df = ps.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df
dates
0 2017-03-30
1 2017-03-31
2 2017-04-01
3 2017-04-02
>>> df.dates.dt.quarter
0 1
1 1
2 2
3 2
Name: dates, dtype: int64
>>> df.dates.dt.is_quarter_start
0 False
1 False
2 True
3 False
Name: dates, dtype: bool
"""
@no_type_check
def pandas_is_quarter_end(s) -> "ps.Series[bool]":
return s.dt.is_quarter_end
return self._data.pandas_on_spark.transform_batch(pandas_is_quarter_end)
@property
def is_year_start(self) -> "ps.Series":
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> dates = ps.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
"""
@no_type_check
def pandas_is_year_start(s) -> "ps.Series[bool]":
return s.dt.is_year_start
return self._data.pandas_on_spark.transform_batch(pandas_is_year_start)
@property
def is_year_end(self) -> "ps.Series":
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series
The same type as the original data with boolean values. Series will
have the same name and index.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> dates = ps.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
"""
@no_type_check
def pandas_is_year_end(s) -> "ps.Series[bool]":
return s.dt.is_year_end
return self._data.pandas_on_spark.transform_batch(pandas_is_year_end)
@property
def is_leap_year(self) -> "ps.Series":
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor.
>>> dates_series = ps.Series(pd.date_range("2012-01-01", "2015-01-01", freq="Y"))
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
"""
@no_type_check
def pandas_is_leap_year(s) -> "ps.Series[bool]":
return s.dt.is_leap_year
return self._data.pandas_on_spark.transform_batch(pandas_is_leap_year)
@property
def daysinmonth(self) -> "ps.Series":
"""
The number of days in the month.
"""
@no_type_check
def pandas_daysinmonth(s) -> "ps.Series[np.int64]":
return s.dt.daysinmonth
return self._data.pandas_on_spark.transform_batch(pandas_daysinmonth)
@property
def days_in_month(self) -> "ps.Series":
return self.daysinmonth
days_in_month.__doc__ = daysinmonth.__doc__
# Methods
@no_type_check
def tz_localize(self, tz) -> "ps.Series":
"""
Localize tz-naive Datetime column to tz-aware Datetime column.
"""
# Neither tz-naive or tz-aware datetime exists in Spark
raise NotImplementedError()
@no_type_check
def tz_convert(self, tz) -> "ps.Series":
"""
Convert tz-aware Datetime column from one time zone to another.
"""
# tz-aware datetime doesn't exist in Spark
raise NotImplementedError()
def normalize(self) -> "ps.Series":
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array.
Returns
-------
Series
The same type as the original data. Series will have the same
name and index.
See Also
--------
floor : Floor the series to the specified freq.
ceil : Ceil the series to the specified freq.
round : Round the series to the specified freq.
Examples
--------
>>> series = ps.Series(pd.Series(pd.date_range('2012-1-1 12:45:31', periods=3, freq='M')))
>>> series.dt.normalize()
0 2012-01-31
1 2012-02-29
2 2012-03-31
dtype: datetime64[ns]
"""
@no_type_check
def pandas_normalize(s) -> "ps.Series[np.datetime64]":
return s.dt.normalize()
return self._data.pandas_on_spark.transform_batch(pandas_normalize)
def strftime(self, date_format: str) -> "ps.Series":
"""
Convert to a string Series using specified date_format.
Return an series of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in python string format
doc.
Parameters
----------
date_format : str
Date format string (example: "%%Y-%%m-%%d").
Returns
-------
Series
Series of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
normalize : Return series with times to midnight.
round : Round the series to the specified freq.
floor : Floor the series to the specified freq.
Examples
--------
>>> series = ps.Series(pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s'))
>>> series
0 2018-03-10 09:00:00
1 2018-03-10 09:00:01
2 2018-03-10 09:00:02
dtype: datetime64[ns]
>>> series.dt.strftime('%B %d, %Y, %r')
0 March 10, 2018, 09:00:00 AM
1 March 10, 2018, 09:00:01 AM
2 March 10, 2018, 09:00:02 AM
dtype: object
"""
@no_type_check
def pandas_strftime(s) -> "ps.Series[str]":
return s.dt.strftime(date_format)
return self._data.pandas_on_spark.transform_batch(pandas_strftime)
def round(self, freq: Union[str, DateOffset], *args: Any, **kwargs: Any) -> "ps.Series":
"""
Perform round operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. note:: this option only works with pandas 0.24.0+
Returns
-------
Series
a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> series = ps.Series(pd.date_range('1/1/2018 11:59:00', periods=3, freq='min'))
>>> series
0 2018-01-01 11:59:00
1 2018-01-01 12:00:00
2 2018-01-01 12:01:00
dtype: datetime64[ns]
>>> series.dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
@no_type_check
def pandas_round(s) -> "ps.Series[np.datetime64]":
return s.dt.round(freq, *args, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_round)
def floor(self, freq: Union[str, DateOffset], *args: Any, **kwargs: Any) -> "ps.Series":
"""
Perform floor operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to floor the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. note:: this option only works with pandas 0.24.0+
Returns
-------
Series
a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> series = ps.Series(pd.date_range('1/1/2018 11:59:00', periods=3, freq='min'))
>>> series
0 2018-01-01 11:59:00
1 2018-01-01 12:00:00
2 2018-01-01 12:01:00
dtype: datetime64[ns]
>>> series.dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
@no_type_check
def pandas_floor(s) -> "ps.Series[np.datetime64]":
return s.dt.floor(freq, *args, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_floor)
def ceil(self, freq: Union[str, DateOffset], *args: Any, **kwargs: Any) -> "ps.Series":
"""
Perform ceil operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. note:: this option only works with pandas 0.24.0+
Returns
-------
Series
a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> series = ps.Series(pd.date_range('1/1/2018 11:59:00', periods=3, freq='min'))
>>> series
0 2018-01-01 11:59:00
1 2018-01-01 12:00:00
2 2018-01-01 12:01:00
dtype: datetime64[ns]
>>> series.dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
@no_type_check
def pandas_ceil(s) -> "ps.Series[np.datetime64]":
return s.dt.ceil(freq, *args, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_ceil)
def month_name(self, locale: Optional[str] = None) -> "ps.Series":
"""
Return the month names of the series with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Series
Series of month names.
Examples
--------
>>> series = ps.Series(pd.date_range(start='2018-01', freq='M', periods=3))
>>> series
0 2018-01-31
1 2018-02-28
2 2018-03-31
dtype: datetime64[ns]
>>> series.dt.month_name()
0 January
1 February
2 March
dtype: object
"""
@no_type_check
def pandas_month_name(s) -> "ps.Series[str]":
return s.dt.month_name(locale=locale)
return self._data.pandas_on_spark.transform_batch(pandas_month_name)
def day_name(self, locale: Optional[str] = None) -> "ps.Series":
"""
Return the day names of the series with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Series
Series of day names.
Examples
--------
>>> series = ps.Series(pd.date_range(start='2018-01-01', freq='D', periods=3))
>>> series
0 2018-01-01
1 2018-01-02
2 2018-01-03
dtype: datetime64[ns]
>>> series.dt.day_name()
0 Monday
1 Tuesday
2 Wednesday
dtype: object
"""
@no_type_check
def pandas_day_name(s) -> "ps.Series[str]":
return s.dt.day_name(locale=locale)
return self._data.pandas_on_spark.transform_batch(pandas_day_name)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.datetimes
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.datetimes.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.datetimes tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.datetimes,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
jseabold/statsmodels | statsmodels/examples/ex_outliers_influence.py | 5 | 3868 |
import numpy as np
import statsmodels.stats.outliers_influence as oi
if __name__ == '__main__':
import statsmodels.api as sm
data = np.array('''\
64 57 8
71 59 10
53 49 6
67 62 11
55 51 8
58 50 7
77 55 10
57 48 9
56 42 10
51 42 6
76 61 12
68 57 9'''.split(), float).reshape(-1,3)
varnames = 'weight height age'.split()
endog = data[:,0]
exog = sm.add_constant(data[:,2])
res_ols = sm.OLS(endog, exog).fit()
hh = (res_ols.model.exog * res_ols.model.pinv_wexog.T).sum(1)
x = res_ols.model.exog
hh_check = np.diag(np.dot(x, np.dot(res_ols.model.normalized_cov_params, x.T)))
from numpy.testing import assert_almost_equal
assert_almost_equal(hh, hh_check, decimal=13)
res = res_ols #alias
#http://en.wikipedia.org/wiki/PRESS_statistic
#predicted residuals, leave one out predicted residuals
resid_press = res.resid / (1-hh)
ess_press = np.dot(resid_press, resid_press)
sigma2_est = np.sqrt(res.mse_resid) #can be replace by different estimators of sigma
sigma_est = np.sqrt(sigma2_est)
resid_studentized = res.resid / sigma_est / np.sqrt(1 - hh)
#http://en.wikipedia.org/wiki/DFFITS:
dffits = resid_studentized * np.sqrt(hh / (1 - hh))
nobs, k_vars = res.model.exog.shape
#Belsley, Kuh and Welsch (1980) suggest a threshold for abs(DFFITS)
dffits_threshold = 2 * np.sqrt(k_vars/nobs)
res_ols.df_modelwc = res_ols.df_model + 1
n_params = res.model.exog.shape[1]
#http://en.wikipedia.org/wiki/Cook%27s_distance
cooks_d = res.resid**2 / sigma2_est / res_ols.df_modelwc * hh / (1 - hh)**2
#or
#Eubank p.93, 94
cooks_d2 = resid_studentized**2 / res_ols.df_modelwc * hh / (1 - hh)
#threshold if normal, also Wikipedia
from scipy import stats
alpha = 0.1
#df looks wrong
print(stats.f.isf(1-alpha, n_params, res.df_resid))
print(stats.f.sf(cooks_d, n_params, res.df_resid))
print('Cooks Distance')
print(cooks_d)
print(cooks_d2)
doplot = 0
if doplot:
import matplotlib.pyplot as plt
fig = plt.figure()
# ax = fig.add_subplot(3,1,1)
# plt.plot(andrew_results.weights, 'o', label='rlm weights')
# plt.legend(loc='lower left')
ax = fig.add_subplot(3,1,2)
plt.plot(cooks_d, 'o', label="Cook's distance")
plt.legend(loc='upper left')
ax2 = fig.add_subplot(3,1,3)
plt.plot(resid_studentized, 'o', label='studentized_resid')
plt.plot(dffits, 'o', label='DFFITS')
leg = plt.legend(loc='lower left', fancybox=True)
leg.get_frame().set_alpha(0.5) #, fontsize='small')
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize='small') # the legend text fontsize
print(oi.reset_ramsey(res, degree=3))
#note, constant in last column
for i in range(1):
print(oi.variance_inflation_factor(res.model.exog, i))
infl = oi.OLSInfluence(res_ols)
print(infl.resid_studentized_external)
print(infl.resid_studentized_internal)
print(infl.summary_table())
print(oi.summary_table(res, alpha=0.05)[0])
'''
>>> res.resid
array([ 4.28571429, 4. , 0.57142857, -3.64285714,
-4.71428571, 1.92857143, 10. , -6.35714286,
-11. , -1.42857143, 1.71428571, 4.64285714])
>>> infl.hat_matrix_diag
array([ 0.10084034, 0.11764706, 0.28571429, 0.20168067, 0.10084034,
0.16806723, 0.11764706, 0.08403361, 0.11764706, 0.28571429,
0.33613445, 0.08403361])
>>> infl.resid_press
array([ 4.76635514, 4.53333333, 0.8 , -4.56315789,
-5.24299065, 2.31818182, 11.33333333, -6.94036697,
-12.46666667, -2. , 2.58227848, 5.06880734])
>>> infl.ess_press
465.98646628086374
'''
| bsd-3-clause |
russel1237/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
nghiattran/self-steering | eval.py | 1 | 2352 | from __future__ import print_function
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
BINS = np.linspace(-2, 2, 100)
def load_data(filepath):
data = pd.read_csv(filepath, usecols=['frame_id', 'steering_angle'], index_col=None)
data.sort_values('frame_id')
files = data['frame_id'][1:].tolist()
angles = data['steering_angle'][1:].tolist()
return np.array(files), np.array(angles, dtype=np.float32)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Path viewer')
parser.add_argument('input', type=str, help='Path to prediction file.')
parser.add_argument('groundtargets', type=str, help='Path to groundtargets file.')
args = parser.parse_args()
pred_ids, preds = load_data(args.input)
targets_ids, targets = load_data(args.groundtargets)
min_shape = min(preds.shape[0], targets.shape[0])
targets = targets[:min_shape]
preds = preds[:min_shape]
# Sanity check
pred_ids = pred_ids[:min_shape]
targets_ids = targets_ids[:min_shape]
if np.sum(targets_ids - pred_ids) != 0:
print(np.sum(targets_ids - pred_ids) )
print('error')
saved_path = 'REPORT'
error = np.abs(targets - preds)
rmse = np.mean(np.square(error)) ** 0.5
plotfile = os.path.join(saved_path, 'targets_vs_predictions_histogram_step.png')
plt.clf()
plt.xlabel('Steering angle')
plt.ylabel('Frequency')
plt.hist(targets, BINS, alpha=0.5, label='targets')
plt.hist(preds, BINS, alpha=0.5, label='predictions')
plt.legend(loc='upper right')
plt.savefig(plotfile)
plotfile = os.path.join(saved_path, 'targets_vs_predictions_scatter_step.png')
plt.clf()
start = - np.pi
end = np.pi
plt.scatter(targets, preds, s=10)
plt.xlabel('Targets')
plt.ylabel('Predictions')
plt.plot([start, end], [start, end], color='red')
plt.savefig(plotfile)
plotfile = os.path.join(saved_path, 'angles_vs_error_scatter_step.png')
plt.clf()
plt.scatter(targets, error, s=10)
plt.xlabel('Angle')
plt.ylabel('Errors')
plt.savefig(plotfile)
print('Sum error', abs(np.sum(error)))
print('Max error:', np.max(error))
print('Mean error:', np.mean(error))
print('Min error:', np.min(error))
print('Root-mean-square error:', rmse) | mit |
sarahgrogan/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
rhuelga/sms-tools | lectures/08-Sound-transformations/plots-code/hps-morph.py | 2 | 2709 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile1='../../../sounds/violin-B3.wav'
window1='blackman'
M1=1001
N1=1024
t1=-100
minSineDur1=0.05
nH=60
minf01=200
maxf01=300
f0et1=10
harmDevSlope1=0.01
stocf=0.1
inputFile2='../../../sounds/soprano-E4.wav'
window2='blackman'
M2=901
N2=1024
t2=-100
minSineDur2=0.05
minf02=250
maxf02=500
f0et2=10
harmDevSlope2=0.01
Ns = 512
H = 128
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
hfreqIntp = np.array([0, .5, 1, .5])
hmagIntp = np.array([0, .5, 1, .5])
stocIntp = np.array([0, .5, 1, .5])
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs1)
UF.wavwrite(y,fs1, 'hps-morph.wav')
plt.figure(figsize=(12, 9))
frame = 200
plt.subplot(2,3,1)
plt.vlines(hfreq1[frame,:], -100, hmag1[frame,:], lw=1.5, color='b')
plt.axis([0,5000, -80, -15])
plt.title('x1: harmonics')
plt.subplot(2,3,2)
plt.vlines(hfreq2[frame,:], -100, hmag2[frame,:], lw=1.5, color='r')
plt.axis([0,5000, -80, -15])
plt.title('x2: harmonics')
plt.subplot(2,3,3)
yhfreq[frame,:][yhfreq[frame,:]==0] = np.nan
plt.vlines(yhfreq[frame,:], -100, yhmag[frame,:], lw=1.5, color='c')
plt.axis([0,5000, -80, -15])
plt.title('y: harmonics')
stocaxis = (fs1/2)*np.arange(stocEnv1[0,:].size)/float(stocEnv1[0,:].size)
plt.subplot(2,3,4)
plt.plot(stocaxis, stocEnv1[frame,:], lw=1.5, marker='x', color='b')
plt.axis([0,20000, -73, -27])
plt.title('x1: stochastic')
plt.subplot(2,3,5)
plt.plot(stocaxis, stocEnv2[frame,:], lw=1.5, marker='x', color='r')
plt.axis([0,20000, -73, -27])
plt.title('x2: stochastic')
plt.subplot(2,3,6)
plt.plot(stocaxis, ystocEnv[frame,:], lw=1.5, marker='x', color='c')
plt.axis([0,20000, -73, -27])
plt.title('y: stochastic')
plt.tight_layout()
plt.savefig('hps-morph.png')
plt.show()
| agpl-3.0 |
jorge2703/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
pianomania/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
bloyl/mne-python | doc/conf.py | 1 | 51195 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import gc
import os
import sys
import time
import warnings
from datetime import datetime, timezone
from distutils.version import LooseVersion
import numpy as np
import matplotlib
import sphinx
import sphinx_gallery
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import mne
from mne.tests.test_docstring_parameters import error_ignores
from mne.utils import (linkcode_resolve, # noqa, analysis:ignore
_assert_no_instances, sizeof_fmt)
from mne.viz import Brain # noqa
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
f'{sphinx_gallery.__version__}')
matplotlib.use('agg')
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- Project information -----------------------------------------------------
project = 'MNE'
td = datetime.now(tz=timezone.utc)
# We need to triage which date type we use so that incremental builds work
# (Sphinx looks at variable changes and rewrites all files if some change)
copyright = (
f'2012–{td.year}, MNE Developers. Last updated <time datetime="{td.isoformat()}" class="localized">{td.strftime("%Y-%m-%d %H:%M %Z")}</time>\n' # noqa: E501
'<script type="text/javascript">$(function () { $("time.localized").each(function () { var el = $(this); el.text(new Date(el.attr("datetime")).toLocaleString([], {dateStyle: "medium", timeStyle: "long"})); }); } )</script>') # noqa: E501
if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true':
copyright = f'2012–{td.year}, MNE Developers. Last updated locally.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'gen_names',
'sphinx_bootstrap_divs',
'sphinxcontrib.bibtex',
'sphinx_copybutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'surfer': ('https://pysurfer.github.io/', None),
'mne_bids': ('https://mne.tools/mne-bids/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
'pyvista': ('https://docs.pyvista.org', None),
'imageio': ('https://imageio.readthedocs.io/en/latest', None),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None),
'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None)
}
# NumPyDoc configuration -----------------------------------------------------
# Define what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'collection': ':doc:`collections <matplotlib:api/collections_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# Mayavi
'mayavi.mlab.Figure': 'mayavi.core.api.Scene',
'mlab.Figure': 'mayavi.core.api.Scene',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'SSD': 'mne.decoding.SSD',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg',
'n_moments',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY',
'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'mayavi.mlab.pipeline.surface',
'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame',
# dipy has resolution problems, wait for them to be solved, e.g.
# https://github.com/dipy/dipy/issues/2290
'dipy.align.AffineMap',
'dipy.align.DiffeomorphicMap',
}
numpydoc_validate = True
numpydoc_validation_checks = {'all'} | set(error_ignores)
numpydoc_validation_exclude = { # set of regex
# dict subclasses
r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys',
r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values',
# list subclasses
r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove',
r'\.sort',
# we currently don't document these properly (probably okay)
r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__',
r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__',
# copied from sklearn
r'mne\.utils\.deprecated',
}
# -- Sphinx-gallery configuration --------------------------------------------
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
try:
from pyvista import Plotter # noqa
except ImportError:
Plotter = None # noqa
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
BackgroundPlotter = None # noqa
try:
from vtk import vtkPolyData # noqa
except ImportError:
vtkPolyData = None # noqa
from mne.viz.backends.renderer import backend
_Renderer = backend._Renderer if backend is not None else None
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
plt.rcParams['animation.embed_limit'] = 30.
gc.collect()
_assert_no_instances(Brain, 'Brain') # calls gc.collect()
if Plotter is not None:
_assert_no_instances(Plotter, 'Plotter')
if BackgroundPlotter is not None:
_assert_no_instances(BackgroundPlotter, 'BackgroundPlotter')
if vtkPolyData is not None:
_assert_no_instances(vtkPolyData, 'vtkPolyData')
_assert_no_instances(_Renderer, '_Renderer')
# This will overwrite some Sphinx printing but it's useful
# for memory timestamps
if os.getenv('SG_STAMP_STARTS', '').lower() == 'true':
import psutil
process = psutil.Process(os.getpid())
mem = sizeof_fmt(process.memory_info().rss)
print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22))
examples_dirs = ['../tutorials', '../examples']
gallery_dirs = ['auto_tutorials', 'auto_examples']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mne.viz.set_3d_backend(mne.viz.get_3d_backend())
except Exception:
report_scraper = None
else:
backend = mne.viz.get_3d_backend()
if backend == 'mayavi':
from traits.api import push_exception_handler
mlab = mne.utils._import_mlab()
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
# hack to initialize the Mayavi Engine
mlab.test_plot3d()
mlab.close()
scrapers += ('mayavi',)
push_exception_handler(reraise_exceptions=True)
elif backend in ('notebook', 'pyvista'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
brain_scraper = mne.viz._brain._BrainScraper()
scrapers += (brain_scraper, 'pyvista')
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
del backend
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/forward/',
'../tutorials/inverse/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/clinical/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith('win'),
'line_numbers': False, # messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': ('images', 'thumbnails'),
'filename_pattern': '^((?!sgskip).)*$',
}
# Files were renamed from plot_* with:
# find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(os.path.join(
os.path.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
# -- Other extension configuration -------------------------------------------
linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501
linkcheck_ignore = [ # will be compiled to regex
r'https://datashare.is.ed.ac.uk/handle/10283/2189\?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)')))
'https://doi.org/10.1002/mds.870120629', # Read timed out.
'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557
'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242
'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095
'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909
'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980
'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4
'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)')))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
]
linkcheck_anchors = False # saves a bit of time
linkcheck_timeout = 15 # some can be quite slow
# autodoc / autosummary
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
# -- Nitpicky ----------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "_FuncT"), # type hint used in @verbose decorator
("py:class", "mne.utils._logging._FuncT"),
]
for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label',
'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report',
'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate',
'VolSourceEstimate', 'VolVectorSourceEstimate',
'channels.DigMontage', 'channels.Layout',
'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator',
'decoding.GeneralizingEstimator', 'decoding.LinearModel',
'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD',
'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator',
'decoding.TemporalFilter', 'decoding.TimeDelayingRidge',
'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter',
'decoding.Vectorizer',
'preprocessing.ICA', 'preprocessing.Xdawn',
'simulation.SourceSimulator',
'time_frequency.CrossSpectralDensity',
'utils.deprecated',
'viz.ClickableImage'):
nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__'))
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'icon_links': [
dict(name='GitHub',
url='https://github.com/mne-tools/mne-python',
icon='fab fa-github-square'),
dict(name='Twitter',
url='https://twitter.com/mne_python',
icon='fab fa-twitter-square'),
dict(name='Discourse',
url='https://mne.discourse.group/',
icon='fab fa-discourse'),
dict(name='Discord',
url='https://discord.gg/rKfvxTuATa',
icon='fab fa-discord')
],
'icon_links_label': 'Quick Links', # for screen reader
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_toc_level': 1,
'navbar_end': ['version-switcher', 'navbar-icon-links'],
'footer_items': ['copyright'],
'google_analytics_id': 'UA-37225609-1',
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['search-field.html', 'sidebar-quicklinks.html'],
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# accommodate different logo shapes (width values in rem)
xs = '2'
sm = '2.5'
md = '3'
lg = '4.5'
xl = '5'
xxl = '6'
# variables to pass to HTML templating engine
html_context = {
'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))),
'versions_dropdown': {
'dev': 'v0.24 (devel)',
'stable': 'v0.23 (stable)',
'0.22': 'v0.22',
'0.21': 'v0.21',
'0.20': 'v0.20',
'0.19': 'v0.19',
'0.18': 'v0.18',
'0.17': 'v0.17',
'0.16': 'v0.16',
'0.15': 'v0.15',
'0.14': 'v0.14',
'0.13': 'v0.13',
'0.12': 'v0.12',
'0.11': 'v0.11',
},
'funders': [
dict(img='nih.png', size='3', title='National Institutes of Health'),
dict(img='nsf.png', size='3.5',
title='US National Science Foundation'),
dict(img='erc.svg', size='3.5', title='European Research Council'),
dict(img='doe.svg', size='3', title='US Department of Energy'),
dict(img='anr.svg', size='4.5',
title='Agence Nationale de la Recherche'),
dict(img='cds.png', size='2.25',
title='Paris-Saclay Center for Data Science'),
dict(img='google.svg', size='2.25', title='Google'),
dict(img='amazon.svg', size='2.5', title='Amazon'),
dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'),
],
'institutions': [
dict(name='Massachusetts General Hospital',
img='MGH.svg',
url='https://www.massgeneral.org/',
size=sm),
dict(name='Athinoula A. Martinos Center for Biomedical Imaging',
img='Martinos.png',
url='https://martinos.org/',
size=md),
dict(name='Harvard Medical School',
img='Harvard.png',
url='https://hms.harvard.edu/',
size=sm),
dict(name='Massachusetts Institute of Technology',
img='MIT.svg',
url='https://web.mit.edu/',
size=md),
dict(name='New York University',
img='NYU.png',
url='https://www.nyu.edu/',
size=xs),
dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501
img='CEA.png',
url='http://www.cea.fr/',
size=md),
dict(name='Aalto-yliopiston perustieteiden korkeakoulu',
img='Aalto.svg',
url='https://sci.aalto.fi/',
size=md),
dict(name='Télécom ParisTech',
img='Telecom_Paris_Tech.svg',
url='https://www.telecom-paris.fr/',
size=md),
dict(name='University of Washington',
img='Washington.png',
url='https://www.washington.edu/',
size=md),
dict(name='Institut du Cerveau et de la Moelle épinière',
img='ICM.jpg',
url='https://icm-institute.org/',
size=md),
dict(name='Boston University',
img='BU.svg',
url='https://www.bu.edu/',
size=lg),
dict(name='Institut national de la santé et de la recherche médicale',
img='Inserm.svg',
url='https://www.inserm.fr/',
size=xl),
dict(name='Forschungszentrum Jülich',
img='Julich.svg',
url='https://www.fz-juelich.de/',
size=xl),
dict(name='Technische Universität Ilmenau',
img='Ilmenau.gif',
url='https://www.tu-ilmenau.de/',
size=xxl),
dict(name='Berkeley Institute for Data Science',
img='BIDS.png',
url='https://bids.berkeley.edu/',
size=lg),
dict(name='Institut national de recherche en informatique et en automatique', # noqa E501
img='inria.png',
url='https://www.inria.fr/',
size=xl),
dict(name='Aarhus Universitet',
img='Aarhus.png',
url='https://www.au.dk/',
size=xl),
dict(name='Karl-Franzens-Universität Graz',
img='Graz.jpg',
url='https://www.uni-graz.at/',
size=md),
dict(name='SWPS Uniwersytet Humanistycznospołeczny',
img='SWPS.svg',
url='https://www.swps.pl/',
size=xl),
dict(name='Max-Planck-Institut für Bildungsforschung',
img='MPIB.svg',
url='https://www.mpib-berlin.mpg.de/',
size=xxl),
dict(name='Macquarie University',
img='Macquarie.png',
url='https://www.mq.edu.au/',
size=lg),
dict(name='Children’s Hospital of Philadelphia Research Institute',
img='CHOP.svg',
url='https://imaging.research.chop.edu/',
size=xxl),
],
# \u00AD is an optional hyphen (not rendered unless needed)
'carousel': [
dict(title='Source Estimation',
text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501
url='auto_tutorials/inverse/30_mne_dspm_loreta.html',
img='sphx_glr_30_mne_dspm_loreta_008.gif',
alt='dSPM'),
dict(title='Machine Learning',
text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501
url='auto_tutorials/machine-learning/50_decoding.html',
img='sphx_glr_50_decoding_006.png',
alt='Decoding'),
dict(title='Encoding Models',
text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501
url='auto_tutorials/machine-learning/30_strf.html',
img='sphx_glr_30_strf_001.png',
alt='STRF'),
dict(title='Statistics',
text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501
url='auto_tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.html', # noqa E501
img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png',
alt='Clusters'),
dict(title='Connectivity',
text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501
url='auto_examples/connectivity/mne_inverse_label_connectivity.html', # noqa E501
img='sphx_glr_mne_inverse_label_connectivity_001.png',
alt='Connectivity'),
dict(title='Data Visualization',
text='Explore your data from multiple perspectives.',
url='auto_tutorials/evoked/20_visualize_evoked.html',
img='sphx_glr_20_visualize_evoked_007.png',
alt='Visualization'),
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = []
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
_np_print_defaults = np.get_printoptions()
# -- Warnings management -----------------------------------------------------
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
# ECoG BIDS spec violations:
warnings.filterwarnings('always', '.*Fiducial point nasion not found.*')
warnings.filterwarnings('always', '.*DigMontage is only a subset of.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
'the old name will be removed', # Jinja, via sphinx
'rcParams is deprecated', # PyVista rcParams -> global_theme
'to mean no clipping',
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'sklearn\.externals\.joblib is deprecated.*',
category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'Fetchers from the nilea.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
# Because we use np.set_printoptions in some tutorials, but we only
# want it to affect those:
np.set_printoptions(**_np_print_defaults)
reset_warnings(None, None)
# -- Fontawesome support -----------------------------------------------------
# here the "b" and "s" refer to "brand" and "solid" (determines which font file
# to look in). "fw-" prefix indicates fixed width.
icons = {
'apple': 'b',
'linux': 'b',
'windows': 'b',
'hand-paper': 's',
'question': 's',
'quote-left': 's',
'rocket': 's',
'server': 's',
'fw-book': 's',
'fw-code-branch': 's',
'fw-newspaper': 's',
'fw-question-circle': 's',
'fw-quote-left': 's',
}
prolog = ''
for icon, cls in icons.items():
fw = ' fa-fw' if icon.startswith('fw-') else ''
prolog += f'''
.. |{icon}| raw:: html
<i class="fa{cls} fa-{icon[3:] if fw else icon}{fw}"></i>
'''
# -- website redirects --------------------------------------------------------
# Static list created 2021/04/13 based on what we needed to redirect,
# since we don't need to add redirects for examples added after this date.
needed_plot_redirects = {
# tutorials
'10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py',
'10_preprocessing_overview.py', '10_raw_overview.py',
'10_reading_meg_data.py', '15_handling_bad_channels.py',
'20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py',
'20_rejecting_bad_data.py', '20_visualize_epochs.py',
'20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py',
'30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py',
'35_artifact_correction_regression.py', '40_artifact_correction_ica.py',
'40_autogenerate_metadata.py', '40_sensor_locations.py',
'40_visualize_raw.py', '45_projectors_background.py',
'50_artifact_correction_ssp.py', '50_configure_mne.py',
'50_epochs_to_data_frame.py', '55_setting_eeg_reference.py',
'59_head_positions.py', '60_make_fixed_length_epochs.py',
'60_maxwell_filtering_sss.py', '70_fnirs_processing.py',
# examples
'3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py',
'cluster_stats_evoked.py', 'compute_csd.py',
'compute_mne_inverse_epochs_in_label.py',
'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py',
'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py',
'custom_inverse_solver.py', 'cwt_sensor_connectivity.py',
'decoding_csp_eeg.py', 'decoding_csp_timefreq.py',
'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py',
'decoding_time_generalization_conditions.py',
'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py',
'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py',
'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py',
'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py',
'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py',
'fdr_stats_evoked.py', 'find_ref_artifacts.py',
'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py',
'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py',
'interpolate_bad_channels.py', 'label_activation_from_stc.py',
'label_from_stc.py', 'label_source_activations.py',
'left_cerebellum_volume_source.py', 'limo_data.py',
'linear_model_patterns.py', 'linear_regression_raw.py',
'meg_sensors.py', 'mixed_norm_inverse.py',
'mixed_source_space_connectivity.py', 'mixed_source_space_inverse.py',
'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py',
'mne_inverse_connectivity_spectrum.py',
'mne_inverse_envelope_correlation.py',
'mne_inverse_envelope_correlation_volume.py',
'mne_inverse_label_connectivity.py', 'mne_inverse_psi_visual.py',
'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py',
'movement_detection.py', 'multidict_reweighted_tfmxne.py',
'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py',
'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py',
'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py',
'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py',
'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py',
'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py',
'sensor_connectivity.py', 'sensor_noise_level.py',
'sensor_permutation_test.py', 'sensor_regression.py',
'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py',
'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py',
'source_label_time_frequency.py', 'source_power_spectrum.py',
'source_power_spectrum_opm.py', 'source_simulator.py',
'source_space_morphing.py', 'source_space_snr.py',
'source_space_time_frequency.py', 'ssd_spatial_filters.py',
'ssp_projs_sensitivity_map.py', 'temporal_whitening.py',
'time_frequency_erds.py', 'time_frequency_global_field_power.py',
'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py',
'topo_compare_conditions.py', 'topo_customized.py',
'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py',
'xhemi.py',
}
tu = 'auto_tutorials'
di = 'discussions'
sm = 'source-modeling'
fw = 'forward'
nv = 'inverse'
sn = 'stats-sensor-space'
sr = 'stats-source-space'
sd = 'sample-datasets'
ml = 'machine-learning'
tf = 'time-freq'
si = 'simulation'
custom_redirects = {
# Custom redirects (one HTML path to another, relative to outdir)
# can be added here as fr->to key->value mappings
f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html',
f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html',
f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501
f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html',
f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html',
f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html',
f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html',
f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html',
f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501
f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501
f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html',
f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html',
f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501
f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501
f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501
f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501
f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html',
f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501
f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501
f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html',
f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501
f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html',
f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html',
f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501
f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501
f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html',
f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501
f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html',
f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501
f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html',
f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sr}/70_cluster_rmANOVA_time_freq.html', # noqa E501
f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501
f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html',
f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501
f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html',
f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html',
}
def make_redirects(app, exception):
"""Make HTML redirects."""
# https://www.sphinx-doc.org/en/master/extdev/appapi.html
# Adapted from sphinxcontrib/redirects (BSD 2-clause)
if not isinstance(app.builder, sphinx.builders.html.StandaloneHTMLBuilder):
return
logger = sphinx.util.logging.getLogger('mne')
TEMPLATE = """\
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url={to}">
<script type="text/javascript">
window.location.href = "{to}"
</script>
<title>Page Redirection</title>
</head>
<body>
If you are not redirected automatically, follow this <a href='{to}'>link</a>.
</body>
</html>""" # noqa: E501
sphinx_gallery_conf = app.config['sphinx_gallery_conf']
for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'],
sphinx_gallery_conf['gallery_dirs']):
root = os.path.abspath(os.path.join(app.srcdir, src_dir))
fnames = [os.path.join(os.path.relpath(dirpath, root), fname)
for dirpath, _, fnames in os.walk(root)
for fname in fnames
if fname in needed_plot_redirects]
# plot_ redirects
for fname in fnames:
dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname))
to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html'
fr_fname = f'plot_{to_fname}'
to_path = os.path.join(dirname, to_fname)
fr_path = os.path.join(dirname, fr_fname)
assert os.path.isfile(to_path), (fname, to_path)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to_fname))
logger.info(
f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}')
# custom redirects
for fr, to in custom_redirects.items():
to_path = os.path.join(app.outdir, to)
assert os.path.isfile(to_path), to
assert to_path.endswith('html'), to_path
fr_path = os.path.join(app.outdir, fr)
assert fr_path.endswith('html'), fr_path
# allow overwrite if existing file is just a redirect
if os.path.isfile(fr_path):
with open(fr_path, 'r') as fid:
for _ in range(8):
next(fid)
line = fid.readline()
assert 'Page Redirection' in line, line
# handle folders that no longer exist
if fr_path.split(os.path.sep)[-2] in (
'misc', 'discussions', 'source-modeling', 'sample-datasets'):
os.makedirs(os.path.dirname(fr_path), exist_ok=True)
# handle links to sibling folders
path_parts = to.split(os.path.sep)
path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):]
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=os.path.join(*path_parts)))
logger.info(
f'Added {len(custom_redirects):3d} HTML custom redirects')
# -- Connect our handlers to the main Sphinx app ---------------------------
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.config.rst_prolog = prolog
app.connect('builder-inited', report_scraper.copyfiles)
app.connect('build-finished', make_redirects)
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
mirestrepo/voxels-at-lems | bmvc12/bof/read_min_max_accuracy.py | 1 | 2965 | from optparse import OptionParser
import numpy as np
from sklearn import preprocessing, neighbors, svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import *
import matplotlib.pyplot as plt
import matplotlib.cm as cmt
#*******************The Main Algorithm ************************#
if __name__=="__main__":
parser = OptionParser()
parser.add_option("-r", "--radius", action="store", type="int", dest="radius", help="radius (multiple of resolution)");
parser.add_option("-p", "--percent", action="store", type="int", dest="percentile", help="percentile of original samples");
parser.add_option("-d", "--descriptor", action="store", type="string", dest="descriptor_type", help="name of the descriptor i.e FPFH");
parser.add_option("-k", "--nmeans", action="store", type="int", dest="K", help="number of means");
parser.add_option("-c", "--clf_name", action="store", type="string", dest="clf_name", help="classifier name");
(opts, args) = parser.parse_args()
print opts
print args
ft=opts.descriptor_type;
radius=opts.radius;
percentile=opts.percentile;
K=opts.K;
clf_name = opts.clf_name;
trials = (0,3,4)
# trials = (0,1, 2, 3,4)
# trials = (0,1, 2, 3)
#*************************************************************************
# Read all labels and get average confusion matrix and average report
#*************************************************************************
feature_name = ft + "_" + str(radius);
#Where results will be saved
avg_classification_dir = "/Users/isa/Experiments/bof_bmvc12/" + "/average/" + feature_name + "/percentile_" + str(percentile)
avg_classification_dir = avg_classification_dir + "/classification_" + str(K);
descriptor_dir = "/Users/isa/Experiments/bof_bmvc12/" + "/trial_" + str(0) + "/" + feature_name + "/percentile_" + str(percentile)
classification_dir = descriptor_dir + "/classification_" + str(K)
prfs_file = classification_dir +'/' + clf_name + "_prf1s.txt"
fis = open(prfs_file, 'r');
prf1s = np.genfromtxt(fis)
recall = prf1s[:,1];
support = prf1s[:,3];
fis.close();
for trial in trials:
descriptor_dir = "/Users/isa/Experiments/bof_bmvc12/" + "/trial_" + str(trial) + "/" + feature_name + "/percentile_" + str(percentile)
classification_dir = descriptor_dir + "/classification_" + str(K);
prfs_file = classification_dir +'/' + clf_name + "_prf1s.txt"
fis = open(prfs_file, 'r');
prf1s = np.genfromtxt(fis)
recall = np.vstack((recall, prf1s[:,1]));
support = np.vstack((support, prf1s[:,3]));
fis.close();
#save the recall file
print "Saving Recall to:" + avg_classification_dir
recall_file = avg_classification_dir +'/' + clf_name + "_recall.txt"
fos = open(recall_file, 'w');
np.savetxt(fos,recall);
fos.close();
support_file = avg_classification_dir +'/' + clf_name + "_support.txt"
fos = open(support_file, 'w');
np.savetxt(fos,support);
fos.close(); | bsd-2-clause |
Lawrence-Liu/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
biorack/metatlas | metatlas/datastructures/object_helpers.py | 1 | 24584 | from __future__ import absolute_import
from __future__ import print_function
import logging
import sys
import os
import getpass
import six
import uuid
from collections import defaultdict
import functools
import dataset
import pandas as pd
import socket
import os.path
import yaml
from six.moves import input
try:
from traitlets import (
HasTraits, CUnicode, List, CInt, Instance, Enum,
CFloat, CBool)
except ImportError:
from IPython.utils.traitlets import (
HasTraits, CUnicode, List, CInt, Instance, Enum,
CFloat, CBool)
logger = logging.getLogger(__name__)
# Whether we are running from NERSC
ON_NERSC = 'METATLAS_LOCAL' not in os.environ
logger.info('NERSC=%s', ON_NERSC)
# Observable List from
# http://stackoverflow.com/a/13259435
def callback_method(func):
def notify(self, *args, **kwargs):
if not hasattr(self, '_callbacks'):
return func(self, *args, **kwargs)
for _, callback in self._callbacks:
callback()
return func(self, *args, **kwargs)
return notify
class NotifyList(list):
extend = callback_method(list.extend)
append = callback_method(list.append)
remove = callback_method(list.remove)
pop = callback_method(list.pop)
__delitem__ = callback_method(list.__delitem__)
__setitem__ = callback_method(list.__setitem__)
__iadd__ = callback_method(list.__iadd__)
__imul__ = callback_method(list.__imul__)
def __getitem__(self, item):
if isinstance(item, slice):
return self.__class__(list.__getitem__(self, item))
else:
return list.__getitem__(self, item)
def __init__(self, *args):
list.__init__(self, *args)
self._callbacks = []
self._callback_cntr = 0
def register_callback(self, cb):
self._callbacks.append((self._callback_cntr, cb))
self._callback_cntr += 1
return self._callback_cntr - 1
def unregister_callback(self, cbid):
for idx, (i, cb) in enumerate(self._callbacks):
if i == cbid:
self._callbacks.pop(idx)
return cb
else:
return None
def set_docstring(cls):
"""Set the docstring for a MetatlasObject object"""
doc = cls.__doc__
if not doc:
doc = cls.__name__ + ' object.'
doc += '\n\nParameters\n----------\n'
for (tname, trait) in sorted(cls.class_traits().items()):
if tname.startswith('_'):
continue
descr = trait.__class__.__name__.lower()
if descr.startswith('c'):
descr = descr[1:]
elif descr == 'enum':
descr = '{' + ', '.join(trait.values) + '}'
doc += '%s: %s\n' % (tname, descr)
help_text = trait.help#get_metadata('help')
if not help_text:
help_text = '%s value.' % tname
help_text = help_text.strip()
if help_text.endswith('.'):
help_text = help_text[:-1]
if trait.metadata.get('readonly', False):
help_text += ' (read only)'
help_text += '.'
doc += ' %s\n' % help_text
cls.__doc__ = doc
return cls
def _get_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in _get_subclasses(s)]
class Workspace(object):
instance = None
def __init__(self):
# get metatlas directory since notebooks and scripts could be launched
# from other locations
# this directory contains the config files
metatlas_dir = os.path.dirname(sys.modules[self.__class__.__module__].__file__)
if ON_NERSC:
with open(os.path.join(metatlas_dir, 'nersc_config', 'nersc.yml')) as fid:
nersc_info = yaml.safe_load(fid)
with open(nersc_info['db_passwd_file']) as fid:
pw = fid.read().strip()
self.path = 'mysql+pymysql://meta_atlas_admin:%[email protected]/%s' % (pw, nersc_info['db_name'])
else:
local_config_file = os.path.join(metatlas_dir, 'local_config', 'local.yml')
if os.path.isfile(local_config_file):
with open(local_config_file) as fid:
local_info = yaml.load(fid)
hostname = 'localhost' if 'db_hostname' not in local_info else local_info['db_hostname']
login = ''
if 'db_username' in local_info:
if 'db_password' in local_info:
login = f"{local_info['db_username']}:{local_info['db_password']}@"
else:
login = f"{local_info['db_username']}@"
self.path = f"mysql+pymysql://{login}{hostname}/{local_info['db_name']}"
else:
filename = f"{getpass.getuser()}_workspace.db"
self.path = f"sqlite:///{filename}"
if os.path.exists(filename):
os.chmod(filename, 0o775)
logging.debug('Using database at: %s', self.path)
self.tablename_lut = dict()
self.subclass_lut = dict()
from .metatlas_objects import MetatlasObject
for klass in _get_subclasses(MetatlasObject):
name = klass.__name__.lower()
self.subclass_lut[name] = klass
if name.endswith('s'):
self.subclass_lut[name + 'es'] = klass
self.tablename_lut[klass] = name + 'es'
else:
self.subclass_lut[name + 's'] = klass
self.tablename_lut[klass] = name + 's'
# handle circular references
self.seen = dict()
Workspace.instance = self
@classmethod
def get_instance(cls):
if Workspace.instance is None:
return Workspace()
return Workspace.instance
def get_connection(self):
"""
Get a re-useable connection to the database.
Each activity that queries the database needs to have this function preceeding it.
"""
try:
if self.db.engine.name == 'mysql':
self.db.query('show tables')
else:
self.db.query('SELECT name FROM sqlite_master WHERE type = "table"')
except Exception:
self.db = dataset.connect(self.path)
def close_connection(self):
self.db.close()
self.db = None
def convert_to_double(self, table, entry):
"""Convert a table column to double type."""
self.get_connection()
self.db.begin()
try:
self.db.query('alter table `%s` modify `%s` double' % (table, entry))
self.db.commit()
except Exception as e:
self.db.rollback()
print(e)
logging.error('Transaction rollback within convert_to_double()')
def save_objects(self, objects, _override=False):
"""Save objects to the database"""
logging.debug('Entering Workspace.save_objects')
if not isinstance(objects, (list, set)):
objects = [objects]
self._seen = dict()
self._link_updates = defaultdict(list)
self._updates = defaultdict(list)
self._inserts = defaultdict(list)
for obj in objects:
self._get_save_data(obj, _override)
logging.debug('Workspace._inserts=%s', self._inserts)
self.get_connection()
self.db.begin()
try:
for (table_name, updates) in self._link_updates.items():
if table_name not in self.db:
continue
for (uid, prev_uid) in updates:
self.db.query('update `%s` set source_id = "%s" where source_id = "%s"' %
(table_name, prev_uid, uid))
for (table_name, updates) in self._updates.items():
if '_' not in table_name and table_name not in self.db:
self.db.create_table(table_name, primary_id='unique_id',
primary_type=self.db.types.string(32))
if 'sqlite' not in self.path:
self.fix_table(table_name)
for (uid, prev_uid) in updates:
self.db.query('update `%s` set unique_id = "%s" where unique_id = "%s"' %
(table_name, prev_uid, uid))
for (table_name, inserts) in self._inserts.items():
if '_' not in table_name and table_name not in self.db:
self.db.create_table(table_name, primary_id='unique_id',
primary_type=self.db.types.string(32))
if 'sqlite' not in self.path:
self.fix_table(table_name)
self.db[table_name].insert_many(inserts)
logging.debug('inserting %s', inserts)
self.db.commit()
except Exception:
self.db.rollback()
logging.error('Transaction rollback within save_objects()')
def create_link_tables(self, klass):
"""
Create a link table in the database of the given trait klass
"""
name = self.table_name[klass]
self.get_connection()
self.db.begin()
try:
for (tname, trait) in klass.class_traits().items():
if isinstance(trait, MetList):
table_name = '_'.join([name, tname])
if table_name not in self.db:
self.db.create_table(table_name)
link = dict(source_id=uuid.uuid4().hex,
head_id=uuid.uuid4().hex,
target_id=uuid.uuid4().hex,
target_table=uuid.uuid4().hex)
self.db[table_name].insert(link)
self.db.commit()
except Exception:
self.db.rollback()
logging.error('Transaction rollback within create_link_tables()')
def _get_save_data(self, obj, override=False):
"""Get the data that will be used to save an object to the database"""
if obj.unique_id in self._seen:
return
if isinstance(obj, Stub):
return
name = self.tablename_lut[obj.__class__]
self._seen[obj.unique_id] = True
changed, prev_uid = obj._update(override)
state = dict()
for (tname, trait) in obj.traits().items():
if tname.startswith('_'):
continue
if isinstance(trait, List):
# handle a list of objects by using a Link table
# create the link table if necessary
table_name = '_'.join([name, tname])
if changed and prev_uid:
self._link_updates[table_name].append((obj.unique_id,
obj.prev_uid))
value = getattr(obj, tname)
# do not store this entry in our own table
if not value:
continue
# create an entry in the table for each item
# store the item in its own table
for subvalue in value:
self._get_save_data(subvalue, override)
link = dict(source_id=obj.unique_id,
head_id=obj.head_id,
target_id=subvalue.unique_id,
target_table=subvalue.__class__.__name__.lower() + 's')
if changed:
self._inserts[table_name].append(link)
elif isinstance(trait, MetInstance):
value = getattr(obj, tname)
# handle a sub-object
# if it is not assigned, use and empty unique_id
if value is None:
state[tname] = ''
# otherwise, store the uid and allow the object to store
# itself
else:
state[tname] = value.unique_id
self._get_save_data(value, override)
elif changed:
value = getattr(obj, tname)
# store the raw value in this table
state[tname] = value
if prev_uid and changed:
self._updates[name].append((obj.unique_id, obj.prev_uid))
else:
state['prev_uid'] = ''
if changed:
self._inserts[name].append(state)
def fix_table(self, table_name):
"""Fix a table by converting floating point values to doubles"""
klass = self.subclass_lut.get(table_name, None)
if not klass:
return
table_name = self.tablename_lut[klass]
for (tname, trait) in klass.class_traits().items():
if isinstance(trait, MetFloat):
self.convert_to_double(table_name, tname)
def retrieve(self, object_type, **kwargs):
"""Retrieve an object from the database."""
object_type = object_type.lower()
klass = self.subclass_lut.get(object_type, None)
items = []
self.get_connection()
self.db.begin()
try:
if object_type not in self.db:
if not klass:
raise ValueError('Unknown object type: %s' % object_type)
object_type = self.tablename_lut[klass]
if '_' not in object_type:
if kwargs.get('username', '') in ['*', 'all']:
kwargs.pop('username')
else:
kwargs.setdefault('username', getpass.getuser())
# Example query if group id is given
# SELECT *
# FROM tablename
# WHERE (city = 'New York' AND name like 'IBM%')
# Example query where unique id and group id are not given
# (to avoid getting all versions of the same object)
# http://stackoverflow.com/a/12102288
# SELECT *
# from (SELECT * from `groups`
# WHERE (name='spam') ORDER BY last_modified)
# x GROUP BY head_id
query = 'select * from `%s` where (' % object_type
clauses = []
for (key, value) in kwargs.items():
if type(value) is list and len(value)>0:
clauses.append('%s in ("%s")' % (key, '", "'.join(value)))
elif not isinstance(value, six.string_types):
clauses.append("%s = %s" % (key, value))
elif '%%' in value:
clauses.append('%s = "%s"' % (key, value.replace('%%', '%')))
elif '%' in value:
clauses.append('%s like "%s"' % (key, value.replace('*', '%')))
else:
clauses.append('%s = "%s"' % (key, value))
query += ' and '.join(clauses) + ')'
if not clauses:
query = query.replace(' where ()', '')
try:
items = list(self.db.query(query))
except Exception as e:
if 'Unknown column' in str(e):
keys = [k for k in klass.class_traits().keys()
if not k.startswith('_')]
raise ValueError('Invalid column name, valid columns: %s' % keys)
else:
raise(e)
items = [klass(**i) for i in items]
uids = [i.unique_id for i in items]
if not items:
return []
# get stubs for each of the list items
for (tname, trait) in items[0].traits().items():
if isinstance(trait, List):
table_name = '_'.join([object_type, tname])
if table_name not in self.db:
for i in items:
setattr(i, tname, [])
continue
querystr = 'select * from `%s` where source_id in ("' % table_name
querystr += '" , "'.join(uids)
result = self.db.query(querystr + '")')
sublist = defaultdict(list)
for r in result:
stub = Stub(unique_id=r['target_id'],
object_type=r['target_table'])
sublist[r['source_id']].append(stub)
for i in items:
setattr(i, tname, sublist[i.unique_id])
elif isinstance(trait, MetInstance):
pass
for i in items:
if not i.prev_uid:
i.prev_uid = 'origin'
i._changed = False
items.sort(key=lambda x: x.last_modified)
self.db.commit()
except Exception:
self.db.rollback()
logging.error('Transaction rollback within retrieve()')
return items
def remove(self, object_type, **kwargs):
"""Remove an object from the database"""
override = kwargs.pop('_override', False)
if not override:
msg = 'Are you sure you want to delete the entries? (Y/N)'
ans = eval(input(msg))
if not ans[0].lower().startswith('y'):
print('Aborting')
return
object_type = object_type.lower()
klass = self.subclass_lut.get(object_type, None)
if not klass:
raise ValueError('Unknown object type: %s' % object_type)
object_type = self.tablename_lut[klass]
kwargs.setdefault('username', getpass.getuser())
# Example query:
# DELETE *
# FROM tablename
# WHERE (city = 'New York' AND name like 'IBM%')
query = 'delete from `%s` where (' % object_type
clauses = []
for (key, value) in kwargs.items():
if not isinstance(value, six.string_types):
clauses.append("%s = %s" % (key, value))
continue
if '%%' in value:
clauses.append('%s = "%s"' % (key, value.replace('%%', '%')))
elif '%' in value:
clauses.append('%s like "%s"' % (key, value.replace('*', '%')))
else:
clauses.append('%s = "%s"' % (key, value))
query += ' and '.join(clauses)
query += ')'
if not clauses:
query = query.replace(' where ()', '')
self.get_connection()
self.db.begin()
try:
# check for lists items that need removal
if any([isinstance(i, MetList) for i in klass.class_traits().values()]):
uid_query = query.replace('delete ', 'select unique_id ')
uids = [i['unique_id'] for i in self.db.query(uid_query)]
sub_query = 'delete from `%s` where source_id in ("%s")'
for (tname, trait) in klass.class_traits().items():
table_name = '%s_%s' % (object_type, tname)
if not uids or table_name not in self.db:
continue
if isinstance(trait, MetList):
table_query = sub_query % (table_name, '", "'.join(uids))
try:
self.db.query(table_query)
except Exception as e:
print(e)
try:
self.db.query(query)
except Exception as e:
if 'Unknown column' in str(e):
keys = [k for k in klass.class_traits().keys()
if not k.startswith('_')]
raise ValueError('Invalid column name, valid columns: %s' % keys)
else:
raise e
print('Removed')
self.db.commit()
except Exception:
self.db.rollback()
logging.error('Transaction rollback within retrieve()')
def remove_objects(self, objects, all_versions=True, **kwargs):
"""Remove a list of objects from the database."""
if not isinstance(objects, (list, set)):
objects = [objects]
if not objects:
print('No objects selected')
return
override = kwargs.pop('_override', False)
if not override:
msg = ('Are you sure you want to delete the %s object(s)? (Y/N)'
% len(objects))
ans = eval(input(msg))
if not ans[0].lower().startswith('y'):
print('Aborting')
return
ids = defaultdict(list)
username = getpass.getuser()
attr = 'head_id' if all_versions else 'unique_id'
self.get_connection()
self.db.begin()
try:
for obj in objects:
if not override and obj.username != username:
continue
name = self.tablename_lut[obj.__class__]
ids[name].append(getattr(obj, attr))
# remove list items as well
for (tname, trait) in obj.traits().items():
if isinstance(trait, MetList):
subname = '%s_%s' % (name, tname)
ids[subname].append(getattr(obj, attr))
for (table_name, uids) in ids.items():
if table_name not in self.db:
continue
query = 'delete from `%s` where %s in ("'
query = query % (table_name, attr)
query += '" , "'.join(uids)
query += '")'
self.db.query(query)
print(('Removed %s object(s)' % len(objects)))
self.db.commit()
except Exception:
self.db.rollback()
logging.error('Transaction rollback within remove_objects()')
def format_timestamp(tstamp):
"""Get a formatted representation of a timestamp."""
try:
ts = pd.Timestamp.fromtimestamp(int(tstamp))
return ts.isoformat()
except Exception:
return str(tstamp)
class MetList(List):
allow_none = True
def validate(self, obj, value):
# value = super(MetList, self).validate(obj, value)
value = super().validate(obj, value)
value = NotifyList(value)
#value.register_callback(lambda: setattr(obj, '_changed', True))
callback = functools.partial(setattr, obj, '_changed', True)
value.register_callback(callback)
return value
class MetUnicode(CUnicode):
allow_none = True
class MetFloat(CFloat):
allow_none = True
class MetInt(CInt):
allow_none = True
class MetBool(CBool):
allow_none = True
class Stub(HasTraits):
unique_id = MetUnicode()
object_type = MetUnicode()
def retrieve(self):
return Workspace.instance.retrieve(self.object_type, username='*',
unique_id=self.unique_id)[0]
def __repr__(self):
return '%s %s' % (self.object_type.capitalize(),
self.unique_id)
def __str__(self):
return str(self.unique_id)
class MetInstance(Instance):
allow_none = True
def validate(self, obj, value):
if isinstance(value, (self.klass, Stub)):
return value
elif isinstance(value, six.string_types):
if value:
return Stub(unique_id=value,
object_type=self.klass.__name__)
else:
return None
else:
self.error(obj, value)
class MetEnum(Enum):
allow_none = True
def get_from_nersc(user, relative_path):
"""Load a remote data file from NERSC to an H5 file
Parameters
----------
user : str
NERSC user account
relative_path : str
Path to file from "/project/projectdirs/metatlas/original_data/<user>/"
"""
import pexpect
from IPython.display import clear_output
cmd = 'scp -o StrictHostKeyChecking=no '
path = "/project/projectdirs/metatlas/original_data/%s/%s"
path = path % (user, relative_path)
cmd += '%[email protected]:%s . && echo "Download Complete"'
cmd = cmd % (user, path)
print(cmd)
proc = pexpect.spawn(cmd)
proc.expect("assword:*")
passwd = eval(input())
clear_output()
proc.send(passwd)
proc.send('\r')
proc.expect('Download Complete')
proc.close()
return os.path.abspath(os.path.basename(relative_path))
| bsd-3-clause |
bbfamily/abu | abupy/TLineBu/ABuTLine.py | 1 | 32261 | # -*- encoding:utf-8 -*-
"""
技术线对象,对外执行,输出模块
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import logging
import math
from collections import Iterable
from enum import Enum
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
from ..TLineBu.ABuTLExecute import shift_distance, bfgs_min_pos, support_resistance_pos, \
select_k_support_resistance, plot_support_resistance_trend, support_resistance_predict, regress_trend_channel, \
below_above_gen, find_percent_point, find_golden_point_ex, find_golden_point, skeleton_how
from ..CoreBu import ABuEnv
from ..CoreBu.ABuBase import FreezeAttrMixin
from ..UtilBu import ABuRegUtil
from ..UtilBu import ABuStatsUtil
from ..UtilBu.ABuDTUtil import arr_to_numpy
from ..UtilBu.ABuLazyUtil import LazyFunc
from ..UtilBu.ABuDTUtil import plt_show
__author__ = '阿布'
__weixin__ = 'abu_quant'
"""模块打印根据环境选择logging.info或者print函数"""
log_func = logging.info if ABuEnv.g_is_ipython else print
"""预备颜色序列集,超出序列数量应使用itertools.cycle循环绘制"""
K_PLT_MAP_STYLE = [
'r', 'g', 'c', 'k', 'm', 'r', 'y']
class ESkeletonHow(Enum):
"""计算骨架走势使用的how"""
"""使用最小值取骨架点位"""
skeleton_min = 0
"""使用最大值取骨架点位"""
skeleton_max = 1
"""使用平均值取骨架点位"""
skeleton_mean = 2
"""使用中位数取骨架点位"""
skeleton_median = 3
"""使用时间序列最后的元素取骨架点位"""
skeleton_close = 4
"""
使用三角模式,即最高,最低,第三点:
确定取最大值,最小值,第三个点位how_func提供,
如果np.argmax(arr) > np.argmin(arr)即最大值位置在最小值前面,
第三点取序列起点,否则取序列终点
"""
skeleton_triangle = 100
class EShiftDistanceHow(Enum):
"""计算位移路程比的how"""
"""
使用时间序列最后的元素做为路程的计算基础:
对应序列的最后一个点位值,标准路程点位值定义
"""
shift_distance_close = 0
"""
使用极限值做为路程的计算基础:
如果p_arr[0] > p_arr[-1],使用np.min(p_arr),否则np.max(p_arr),即上升趋势取max,下跌趋势取min
"""
shift_distance_maxmin = 1
"""
使用序列的sum和极限值为路程的计算基础:
如果abs(p_arr.max() - p_arr[-1]) > abs(p_arr[-1] - p_arr.min()) 取np.min(p_arr)否则np.max(p_arr)
"""
shift_distance_sum_maxmin = 2
"""step_x_to_step函数中序列步长的常数单元值"""
g_step_unit = 10
class AbuTLine(FreezeAttrMixin):
"""技术线封装执行对外操作的对象类"""
def __init__(self, line, line_name, **kwargs):
"""
:param line: 技术线可迭代序列,内部会通过arr_to_numpy统一转换numpy
:param line_name: 技术线名称,str对象
:param kwargs mean: 外部可选择通过kwargs设置mean,如不设置line.mean()
:param kwargs std: 外部可选择通过kwargs设置std,如不设置line.std()
:param kwargs high: 外部可选择通过kwargs设置high,如不设置self.mean + self.std
:param kwargs low: 外部可选择通过kwargs设置low,如不设置self.mean - self.std
:param kwargs close: 外部可选择通过kwargs设置close,如不设置line[-1]
"""
# 把序列的nan进行填充,实际上应该是外面根据数据逻辑把nan进行填充好了再传递进来,这里只能都使用bfill填了
line = pd.Series(line).fillna(method='bfill')
self.tl = arr_to_numpy(line)
self.mean = kwargs.pop('mean', self.tl.mean())
self.std = kwargs.pop('std', self.tl.std())
self.high = kwargs.pop('high', self.mean + self.std)
self.low = kwargs.pop('low', self.mean - self.std)
self.close = kwargs.pop('close', self.tl[-1])
self.x = np.arange(0, self.tl.shape[0])
self.line_name = line_name
for k, v in kwargs:
# 需要设置什么都通过kwargs设置进来,不然_freeze后无法设置
setattr(self, k, v)
# 需要进行定稿,初始化好就不能动
self._freeze()
@classmethod
def show_kl_pd(cls, kl_pd, key='close', show=True, **kwargs):
"""
类方法,针对金融时间序列中的数据列进行技术线分析,可视化最优拟合次数,
路程位移比,拟合通道曲线,骨架通道,阻力位和支撑位等技术线分析,返回
AbuTLine对象
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param key: kl_pd中做为技术线的列名称,str对象
:param show: 是否可视化,可视化最优拟合次数,路程位移比,拟合通道曲线,骨架通道,阻力位和支撑位等
:param kwargs: 可视化函数涉及的其它参数 eg:step_x, only_last等
:return: 返回AbuTLine对象
"""
line = cls(kl_pd[key], key)
if show:
# 可以通过kwargs设置show的参数,先pop出来
zoom = kwargs.pop('zoom', False)
step_x = kwargs.pop('step_x', 1.0)
how = kwargs.pop('how', EShiftDistanceHow.shift_distance_close)
only_last = kwargs.pop('only_last', False)
line.show()
# 可视化技术线最优拟合次数
line.show_best_poly(zoom=zoom)
# 可视化技术线'路程位移比'
line.show_shift_distance(step_x=step_x, how=how)
# 可视化技术线拟合曲线及上下拟合通道曲线
line.show_regress_trend_channel(step_x=step_x)
# 可视化可视化技术线骨架通道
line.show_skeleton_channel(step_x=step_x)
# 可视化技术线比例分割的区域
line.show_percents()
# 可视化技术线黄金分割
line.show_golden()
# 对技术线阻力位和支撑位进行绘制, 以及所有中间过程
line.show_support_resistance_trend(only_last=only_last)
return line
@LazyFunc
def score(self):
"""
被LazyFunc装饰:
score代表当前技术线值在当前的位置, (self.close - self.low) / (self.high - self.low)
eg:
self.high = 100, self.low=0,self.close=80
-> (self.close - self.low) / (self.high - self.low) = 0.8
即代表当前位置在整体的0.8位置上
:return: 技术线当前score, 返回值在0-1之间
"""
if self.high == self.low:
score = 0.8 if self.close > self.low else 0.2
else:
score = (self.close - self.low) / (self.high - self.low)
return score
@LazyFunc
def y_zoom(self):
"""
被LazyFunc装饰:
获取对象技术线tl被self.x缩放后的序列y_zoom
:return: 放后的序列y_zoom
"""
zoom_factor = self.x.max() / self.tl.max()
y_zoom = zoom_factor * self.tl
return y_zoom
def step_x_to_step(self, step_x):
"""
针对技术线的时间范围步长选择函数,在show_shift_distance,show_regress_trend_channel,
show_skeleton_channel等涉及时间步长的函数中用来控制步长范围
:param step_x: 时间步长控制参数,float
:return: 最终输出被控制在2-len(self.tl), int
"""
if step_x <= 0:
# 不正常step_x规范到正常范围中
log_func('input step_x={} is error, change to step_x=1'.format(step_x))
step_x = 1
# 如果需要调整更细的粒度,调整g_step_unit的值
step = int(math.floor(len(self.tl) / g_step_unit / step_x))
# 输出被控制在2-len(self.tl)
step = len(self.tl) if step > len(self.tl) else step
step = 2 if step < 2 else step
return step
def show(self):
"""可视化技术线最基本的信息,high,mean,low"""
plt.subplots(figsize=ABuEnv.g_plt_figsize)
# tl装载技术线本体
plt.plot(self.tl)
plt.axhline(self.high, color='c')
plt.axhline(self.mean, color='r')
plt.axhline(self.low, color='g')
_ = plt.setp(plt.gca().get_xticklabels(), rotation=30)
plt.legend(['TLine', 'high', 'mean', 'low'],
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title(self.line_name)
plt.show()
def is_up_trend(self, up_deg_threshold=5, show=True):
"""
判断走势是否符合上升走势:
1. 判断走势是否可以使用一次拟合进行描述
2. 如果可以使用1次拟合进行描述,计算一次拟合趋势角度
3. 如果1次拟合趋势角度 >= up_deg_threshold判定上升
:param up_deg_threshold: 判定一次拟合趋势角度为上升趋势的阀值角度,默认5
:param show: 是否显示判定过程视图
:return: 是否上升趋势
"""
valid = ABuRegUtil.valid_poly(self.tl, poly=1, show=show)
if valid:
deg = ABuRegUtil.calc_regress_deg(self.tl, show=show)
if deg >= up_deg_threshold:
return True
return False
def is_down_trend(self, down_deg_threshold=-5, show=True):
"""
判断走势是否符合下降走势:
1. 判断走势是否可以使用一次拟合进行描述
2. 如果可以使用1次拟合进行描述,计算一次拟合趋势角度
3. 如果1次拟合趋势角度 <= down_deg_threshold判定下降
:param down_deg_threshold: 判定一次拟合趋势角度为下降趋势的阀值角度,默认-5
:param show: 是否显示判定过程视图
:return: 是否下降趋势
"""
valid = ABuRegUtil.valid_poly(self.tl, poly=1, show=show)
# logging.debug('is_down_trend valid:{}'.format(valid))
if valid:
deg = ABuRegUtil.calc_regress_deg(self.tl, show=show)
if deg <= down_deg_threshold:
return True
return False
def show_best_poly(self, zoom=False, show=show):
"""
可视化技术线最优拟合次数,寻找poly(1-100)次多项式拟合回归的趋势曲线可以比较完美的代表原始曲线y的走势,
具体详情ABuRegUtil.search_best_poly
:param zoom: 透传search_best_poly是否缩放x,y
:param show: 是否进行可视化
"""
best = ABuRegUtil.search_best_poly(self.tl, zoom=zoom, show=show)
if show:
log_func('best poly = {}, zoom={}'.format(best, zoom))
return best
def show_least_valid_poly(self, zoom=False, show=True):
"""
可视化技术线,检测至少poly次拟合曲线可以代表原始曲线y的走势,
具体详情ABuRegUtil.least_valid_poly
:param zoom: 透传least_valid_poly是否缩放x,y
:param show: 是否进行可视化
"""
least = ABuRegUtil.least_valid_poly(self.tl, zoom=zoom, show=show)
if show:
log_func('least poly = {}, zoom={}'.format(least, zoom))
return least
def show_shift_distance(self, how=EShiftDistanceHow.shift_distance_close, step_x=1.0, show=True, show_log=True):
"""
可视化技术线'路程位移比',注意默认使用shift_distance_close对应标准路程点位值定义方法,其它方法对应的
路程终点点位值使用的计算方法并非得到最准确的'路程位移比',实现详ABuTLExecute.shift_distance
:param how: EShiftDistanceHow对象或者callable即外部可自行设置方法,即计算算路程终点点位值使用的计算方法可自定义
:param step_x: 时间步长控制参数,默认1.0,float
:param show: 是否进行可视化
:param show_log: 是否输出位移路程比各个段比值,默认True
:return 对每一个金融序列切片进行shift_distance的返回结果序列,即每一个序列中的元素为:
h_distance(三角底边距离), v_distance(三角垂直距离),
distance(斜边,路程), shift(位移), sd(位移路程比:shift / distance)
所组成的tuple对象
"""
# 这里使用了缩放后的y,因为确保路程位移比具有更好的适应性
y = self.y_zoom
step = self.step_x_to_step(step_x)
if show:
plt.figure(figsize=ABuEnv.g_plt_figsize)
plt.plot(y)
shift_distance_list = []
for slice_end, color in zip(np.arange(step, len(y), step), itertools.cycle(K_PLT_MAP_STYLE)):
slice_start = slice_end - step
shift_distance_list.append(
shift_distance(y, how, slice_start=slice_start, slice_end=slice_end,
color=color, show=show, show_log=show_log, ps=False))
if show:
plt.show()
return shift_distance_list
def show_regress_trend_channel(self, step_x=1.0):
"""
可视化技术线拟合曲线及上下拟合通道曲线,返回三条拟合曲线,组成拟合通道
:param step_x: 时间步长控制参数,默认1.0,float
"""
y = self.tl
step = self.step_x_to_step(step_x)
with plt_show():
plt.plot(y)
for slice_end, color in zip(np.arange(step, len(y), step), itertools.cycle(K_PLT_MAP_STYLE)):
slice_start = slice_end - step
slice_arr = y[slice_start:slice_end]
# 通过regress_trend_channel获取切片段的上中下三段拟合曲线值
y_below, y_fit, y_above = regress_trend_channel(slice_arr)
x = self.x[slice_start:slice_end]
plt.plot(x, y_below, 'g')
plt.plot(x, y_fit, 'y')
plt.plot(x, y_above, 'r')
def show_skeleton_channel(self, with_mean=True, step_x=1.0):
"""
套接show_skeleton,可视化可视化技术线骨架通道,通道由:
ESkeletonHow.skeleton_min:下通道,
ESkeletonHow.skeleton_max:上通道,
ESkeletonHow.skeleton_mean 中轨通道,组成
:param with_mean: 是否绘制ESkeletonHow.skeleton_mean 中轨通道,默认True
:param step_x: 时间步长控制参数,默认1.0,float
"""
plt.figure(figsize=ABuEnv.g_plt_figsize)
self.show_skeleton(how=ESkeletonHow.skeleton_min, step_x=step_x, ps=False)
self.show_skeleton(how=ESkeletonHow.skeleton_max, step_x=step_x, ps=False)
if with_mean:
self.show_skeleton(how=ESkeletonHow.skeleton_mean, step_x=step_x, ps=False)
# 前面的绘制ps都是False, 这里统一show
plt.plot(self.tl)
def show_skeleton(self, how=ESkeletonHow.skeleton_min, step_x=1.0, ps=True):
"""
可视化技术线骨架结构
:param how: 计算数据序列骨架点位的方法,ESkeletonHow对象或者callable即外部可自行设置方法,
即计算数据序列骨架点位的方法可自定义
:param step_x: 时间步长控制参数,默认1.0,float
:param ps: 是否立即执行plt.show()
"""
step = self.step_x_to_step(step_x)
# 每个单位都先画一个点,由两个点连成一条直线形成股价骨架图
last_pos = None
# 根据how映射计算数据序列骨架点位的方法
how_func = skeleton_how(how)
if ps:
plt.figure(figsize=ABuEnv.g_plt_figsize)
for slice_end, color in zip(np.arange(step, len(self.tl), step), itertools.cycle(K_PLT_MAP_STYLE)):
slice_start = slice_end - step
slice_arr = self.tl[slice_start:slice_end]
if how == ESkeletonHow.skeleton_triangle:
"""
三角模式骨架点位:确定取最大值,最小值,第三个点位how_func提供
如果np.argmax(arr) > np.argmin(arr)即最大值位置在最小值前面,第三点取序列起点,否则取序列终点
"""
max_pos = (np.argmax(slice_arr) + slice_start, np.max(slice_arr))
min_pos = (np.argmin(slice_arr) + slice_start, np.min(slice_arr))
draw_pos = how_func(slice_arr, slice_start)
plt.plot([draw_pos[0], min_pos[0]],
[draw_pos[1], min_pos[1]], c=color)
plt.plot([draw_pos[0], max_pos[0]],
[draw_pos[1], max_pos[1]], c=color)
plt.plot([min_pos[0], max_pos[0]],
[min_pos[1], max_pos[1]], c=color)
else:
# 其它骨架数据计算方法
draw_pos = (slice_start, how_func(slice_arr))
if last_pos is not None:
# 将两两临近局部最小值相连,两个点连成一条直线
plt.plot([last_pos[0], draw_pos[0]],
[last_pos[1], draw_pos[1]], 'o-')
# 将这个步长单位内的最小值点赋予last_pos
last_pos = draw_pos
if ps:
plt.plot(self.tl)
def show_skeleton_bfgs(self, step_x=1.0):
"""
可视化技术线骨架结构与show_skeleton不同,由bfgs确定骨架点位值,即通过
scipy.interpolate.interp1d插值形成模型通过sco.fmin_bfgs计算骨架点位值
:param step_x: 时间步长控制参数,默认1.0,float
"""
step = self.step_x_to_step(step_x)
# scipy.interpolate.interp1d插值形成模型
linear_interp = interp1d(self.x, self.tl)
# 每个单位都先画一个点,由两个点连成一条直线形成股价骨架图
last_pos = None
with plt_show():
# 每步长step单位求一次局部最小
for find_min_pos in np.arange(step, len(self.tl), step):
# sco.fmin_bfgs计算骨架点位值
local_min_pos = int(bfgs_min_pos(find_min_pos, linear_interp, len(self.tl)))
if local_min_pos == -1:
# 其实主要就是利用这里找不到的情况进行过滤
continue
# 形成最小点位置信息(x, y)
draw_pos = (local_min_pos, self.tl[local_min_pos])
# 第一个step单位last_pos=none, 之后都有值
if last_pos is not None:
# 将两两临近局部最小值相连,两个点连成一条直线
plt.plot([last_pos[0], draw_pos[0]],
[last_pos[1], draw_pos[1]], 'o-')
# 将这个步长单位内的最小值点赋予last_pos
last_pos = draw_pos
def show_support_resistance_pos(self, best_poly=0, show=True):
"""
可视化分析技术线阻力位和支撑位,通过sco.fmin_bfgs寻找阻力位支撑位,阻力位点也是通过sco.fmin_bfgs寻找,
但是要求传递进来的序列已经是标准化后取反的序列
eg:
demean_y = ABuStatsUtil.demean(self.tl): 首先通过demean将序列去均值
resistance_y = demean_y * -1 :阻力位序列要取反
support_y = demean_y :支持位序列不需要取反
:param best_poly: 函数使用者可设置best_poly, 设置后就不使用ABuRegUtil.search_best_poly寻找了,
详细阅ABuTLExecute.support_resistance_pos
:param show: 是否可视化
:return: (技术线支撑位: support_pos, 技术线阻力位: resistance_pos)
"""
# 首先通过demean将序列去均值
demean_y = ABuStatsUtil.demean(self.tl)
# 阻力位序列要取反
resistance_y = demean_y * -1
# 支持位序列不需要取反
support_y = demean_y
# 分析技术线支撑位
support_pos = support_resistance_pos(self.x, support_y, best_poly=best_poly, label='support pos')
# 分析技术线阻力位
resistance_pos = support_resistance_pos(self.x, resistance_y, best_poly=best_poly,
label='resistance pos')
if show:
plt.plot(self.x, self.tl, '--', support_pos, self.tl[support_pos], 'o',
resistance_pos, self.tl[resistance_pos], 'p')
plt.show()
# 返回 (技术线支撑位: support_pos, 技术线阻力位: resistance_pos)
return support_pos, resistance_pos
def show_support_resistance_select_k(self, best_poly=0, show=True):
"""
可视化分析技术线阻力位和支撑位序列从1-序列个数开始聚类,多个聚类器的方差值进行比较,
通过方差阀值等方法找到最佳聚类个数,最终得到kmean最佳分类器对象
:param best_poly: 传递show_support_resistance_pos,
函数使用者可设置best_poly, 设置后就不使用ABuRegUtil.search_best_poly寻找了
:param show: 是否可视化显示
:return: upport_est, resistance_est, support_pos, resistance_pos
"""
# 可视化分析技术线阻力位或者支撑位
support_pos, resistance_pos = self.show_support_resistance_pos(best_poly, show=show)
support_pos = np.array([support_pos, [self.tl[support] for support in support_pos]]).T
resistance_pos = np.array([resistance_pos, [self.tl[resistance] for resistance in resistance_pos]]).T
support_est = None
if len(support_pos) > 1:
# 多个聚类器的方差值进行比较,通过方差阀值等方法找到最佳聚类个数,最终得到kmean最佳分类器对象
# 注意这里的show直接False了
support_est = select_k_support_resistance(support_pos, label='support k choice', show=False)
resistance_est = None
if len(resistance_pos) > 1:
# 注意这里的show直接False了
resistance_est = select_k_support_resistance(resistance_pos, label='resistance k choice', show=False)
return support_est, resistance_est, support_pos, resistance_pos
def show_support_resistance_trend(self, best_poly=0, only_last=False, plot_org=False, show=True, show_step=False):
"""
套接:show_support_resistance_select_k->support_resistance_predict
->ABuTLExecute.plot_support_resistance_trend
最终对技术线阻力位和支撑位进行绘制,注意show参数控制的是中间流程中的可视化,不包括
最终阻力位和支撑的可视化
:param best_poly: 传递show_support_resistance_pos,
函数使用者可设置best_poly, 设置后就不使用ABuRegUtil.search_best_poly寻找了
:param only_last: 透传ABuTLExecute.plot_support_resistance_trend,控制只绘制时间序列中最后一个发现的阻力或支撑
:param plot_org: 透传ABuTLExecute.plot_support_resistance_trend,控制是否绘制线段还是直线,控制是否绘制线段还是直线,
plot_org=True时绘制线段,否则通过LinearRegression进行
:param show_step: show_step参数控制的是中间流程中的可视化, 不包括最终阻力位或者支撑的可视化
:param show: show: show参数控制的是最终阻力位或者支撑的可视化
"""
support_est, resistance_est, support_pos, resistance_pos = self.show_support_resistance_select_k(best_poly,
show=show_step)
if show:
plt.figure(figsize=ABuEnv.g_plt_figsize)
y_trend_dict = {}
if support_est is not None:
# FIXME 针对极端没有找到足够绘制支撑阻力位的情况做处理
support_trend = support_resistance_predict(self.x, self.tl, support_est, support_pos,
is_support=True, show=show_step)
y_support_trend = plot_support_resistance_trend(self.x, self.tl, support_trend, 'support trend line',
only_last=only_last, plot_org=plot_org, show=show)
if y_support_trend is not None:
y_trend_dict['support'] = y_support_trend
else:
log_func('can\'t plot support !')
if resistance_est is not None:
resistance_trend = support_resistance_predict(self.x, self.tl, resistance_est,
resistance_pos, is_support=False, show=show_step)
y_resistance_trend = plot_support_resistance_trend(self.x, self.tl, resistance_trend,
'resistance trend line',
only_last=only_last, plot_org=plot_org, show=show)
if y_resistance_trend is not None:
y_trend_dict['resistance'] = y_resistance_trend
else:
log_func('can\'t plot resistance !')
if show:
plt.legend(loc=2)
plt.show()
return y_trend_dict
def show_support_trend(self, best_poly=0, only_last=False, plot_org=False, show=True, show_step=False):
"""
最终对技术线只对阻力位进行绘制
套接:show_support_resistance_select_k->support_resistance_predict
->ABuTLExecute.plot_support_resistance_trend
:param best_poly: 传递show_support_resistance_pos,
函数使用者可设置best_poly, 设置后就不使用ABuRegUtil.search_best_poly寻找了
:param only_last: 透传ABuTLExecute.plot_support_resistance_trend,控制只绘制时间序列中最后一个发现的阻力或支撑
:param plot_org: 透传ABuTLExecute.plot_support_resistance_trend,控制是否绘制线段还是直线,控制是否绘制线段还是直线,
plot_org=True时绘制线段,否则通过LinearRegression进行
:param show_step: show_step参数控制的是中间流程中的可视化, 不包括最终阻力位或者支撑的可视化
:param show: show: show参数控制的是最终阻力位或者支撑的可视化
"""
if show:
plt.figure(figsize=ABuEnv.g_plt_figsize)
support_est, _, support_pos, _ = self.show_support_resistance_select_k(best_poly,
show=show_step)
y_trend_dict = {}
if support_est is not None:
support_trend = support_resistance_predict(self.x, self.tl, support_est, support_pos,
is_support=True, show=show_step)
y_support_trend = plot_support_resistance_trend(self.x, self.tl, support_trend, 'support trend line',
only_last=only_last, plot_org=plot_org, show=show)
if y_support_trend is not None:
y_trend_dict['support'] = y_support_trend
if show:
plt.legend(loc=2)
plt.show()
return y_trend_dict
def show_resistance_trend(self, best_poly=0, only_last=False, plot_org=False, show=True, show_step=False):
"""
最终对技术线只对支撑位进行绘制
套接:show_support_resistance_select_k->support_resistance_predict
->ABuTLExecute.plot_support_resistance_trend
:param best_poly: 传递show_support_resistance_pos,
函数使用者可设置best_poly, 设置后就不使用ABuRegUtil.search_best_poly寻找了
:param only_last: 透传ABuTLExecute.plot_support_resistance_trend,控制只绘制时间序列中最后一个发现的阻力或支撑
:param plot_org: 透传ABuTLExecute.plot_support_resistance_trend,控制是否绘制线段还是直线,控制是否绘制线段还是直线,
plot_org=True时绘制线段,否则通过LinearRegression进行
:param show_step: show_step参数控制的是中间流程中的可视化, 不包括最终阻力位或者支撑的可视化
:param show: show: show参数控制的是最终阻力位或者支撑的可视化
"""
_, resistance_est, _, resistance_pos = self.show_support_resistance_select_k(best_poly,
show=show_step)
if show:
plt.figure(figsize=ABuEnv.g_plt_figsize)
y_trend_dict = {}
if resistance_est is not None:
resistance_trend = support_resistance_predict(self.x, self.tl, resistance_est,
resistance_pos, is_support=False, show=show_step)
y_resistance_trend = plot_support_resistance_trend(self.x, self.tl, resistance_trend,
'resistance trend line',
only_last=only_last, plot_org=plot_org, show=show)
if y_resistance_trend is not None:
y_trend_dict['resistance'] = y_resistance_trend
if show:
plt.legend(loc=2)
plt.show()
return y_trend_dict
def show_percents(self, percents=(0.1, 0.9)):
"""
可视化技术线比例分割的区域
:param percents: float值或者可迭代序列,默认使用(0.1, 0.9)
:return:
"""
if not isinstance(percents, Iterable):
# 如果不是可迭代序列,添加到list中,便于统一处理
percents = [percents]
pts_dict = find_percent_point(percents, self.tl)
with plt_show():
plt.plot(self.tl)
"""
eg: pts_dict 形如:
{0.1: (15.732749999999999, 15.5075), 0.9: (31.995000000000005, 34.387500000000003)}
即返回的是一个比例地带,绘制地带的上下边界
"""
for pt, color in zip(pts_dict, itertools.cycle(K_PLT_MAP_STYLE)):
stats_key = 'stats:{}'.format(pt)
sight_key = 'sight:{}'.format(pt)
p_dict = {stats_key: pts_dict[pt][0], sight_key: pts_dict[pt][1]}
plt.axhline(p_dict[stats_key], c=color, label=stats_key)
plt.axhline(p_dict[sight_key], c='y', label=sight_key)
below, above = below_above_gen(*pts_dict[pt])
plt.fill_between(self.x, below, above,
alpha=0.5, color=color)
plt.legend(loc='best')
def show_golden(self, both_golden=True):
"""
可视化技术线黄金分割
:param both_golden: 代表同时可视化两种分割线的计算在一个画布上
:return:
"""
if both_golden:
# 同时可视化两种分割线的计算在一个画布上直接套接show_percents
self.show_percents(percents=(0.382, 0.618))
else:
# 分别可视化
find_golden_point_ex(self.x, self.tl, show=True)
find_golden_point(self.x, self.tl, show=True)
def __str__(self):
"""打印对象显示:line_name: close, below, above, mean"""
return "{}: now:{} below:{} above:{}".format(self.line_name,
self.close,
self.low,
self.high, self.mean)
__repr__ = __str__
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/table_demo.py | 6 | 1719 | """
Demo of table function to display a table within a plot.
"""
import numpy as np
import matplotlib.pyplot as plt
data = [[ 66386, 174296, 75131, 577908, 32015],
[ 58230, 381139, 78045, 99308, 160454],
[ 89135, 80552, 152558, 497981, 603535],
[ 78415, 81858, 150656, 193263, 69638],
[ 139361, 331509, 343164, 781380, 52269]]
columns = ('Freeze', 'Wind', 'Flood', 'Quake', 'Hail')
rows = ['%d year' % x for x in (100, 50, 20, 10, 5)]
values = np.arange(0, 2500, 500)
value_increment = 1000
# Get some pastel shades for the colors
colors = plt.cm.BuPu(np.linspace(0, 0.5, len(columns)))
n_rows = len(data)
index = np.arange(len(columns)) + 0.3
bar_width = 0.4
# Initialize the vertical-offset for the stacked bar chart.
y_offset = np.array([0.0] * len(columns))
# Plot bars and create text labels for the table
cell_text = []
for row in range(n_rows):
plt.bar(index, data[row], bar_width, bottom=y_offset, color=colors[row])
y_offset = y_offset + data[row]
cell_text.append(['%1.1f' % (x/1000.0) for x in y_offset])
# Reverse colors and text labels to display the last value at the top.
colors = colors[::-1]
cell_text.reverse()
# Add a table at the bottom of the axes
the_table = plt.table(cellText=cell_text,
rowLabels=rows,
rowColours=colors,
colLabels=columns,
loc='bottom')
# Adjust layout to make room for the table:
plt.subplots_adjust(left=0.2, bottom=0.2)
plt.ylabel("Loss in ${0}'s".format(value_increment))
plt.yticks(values * value_increment, ['%d' % val for val in values])
plt.xticks([])
plt.title('Loss by Disaster')
plt.show()
| mit |
abhishekgahlot/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
466152112/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
ywcui1990/htmresearch | projects/sequence_prediction/discrete_sequences/plotMultiplePredictionWithErrBar.py | 12 | 6799 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot multiple prediction experiment result with error bars
"""
import os
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
from plot import movingAverage
from plot import computeAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
def loadExperiment(experiment):
print "Loading experiment ", experiment
data = readExperiment(experiment)
(accuracy, x) = computeAccuracy(data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'])
accuracy = movingAverage(accuracy, min(len(accuracy), 100))
return (accuracy, x)
def calculateMeanStd(accuracyAll):
numRepeats = len(accuracyAll)
numLength = min([len(a) for a in accuracyAll])
accuracyMat = np.zeros(shape=(numRepeats, numLength))
for i in range(numRepeats):
accuracyMat[i, :] = accuracyAll[i][:numLength]
meanAccuracy = np.mean(accuracyMat, axis=0)
stdAccuracy = np.std(accuracyMat, axis=0)
return (meanAccuracy, stdAccuracy)
def plotWithErrBar(x, y, error, color):
plt.fill_between(x, y-error, y+error,
alpha=0.3, edgecolor=color, facecolor=color)
plt.plot(x, y, color, color=color, linewidth=4)
plt.ylabel('Prediction Accuracy')
plt.xlabel(' Number of elements seen')
if __name__ == '__main__':
try:
# Load raw experiment results
# You have to run the experiments
# In ./tm/
# python tm_suite.py --experiment="high-order-distributed-random-perturbed" -d
# In ./lstm/
# python suite.py --experiment="high-order-distributed-random-perturbed" -d
expResults = {}
tmResults = os.path.join("tm/results",
"high-order-distributed-random-multiple-predictions")
lstmResults = os.path.join("lstm/results",
"high-order-distributed-random-multiple-predictions")
elmResults = os.path.join("elm/results",
"high-order-distributed-random-multiple-predictions")
for numPrediction in [2, 4]:
accuracyTM = []
accuracyLSTM = []
accuracyELM = []
for seed in range(10):
experiment = os.path.join(tmResults,
"num_predictions{:.1f}seed{:.1f}".format(numPrediction, seed),
"0.log")
(accuracy, x) = loadExperiment(experiment)
accuracyTM.append(np.array(accuracy))
experiment = os.path.join(lstmResults,
"seed{:.1f}num_predictions{:.1f}".format(seed, numPrediction),
"0.log")
(accuracy, x) = loadExperiment(experiment)
accuracyLSTM.append(np.array(accuracy))
experiment = os.path.join(elmResults,
"seed{:.1f}num_predictions{:.1f}".format(seed, numPrediction),
"0.log")
(accuracy, x) = loadExperiment(experiment)
accuracyELM.append(np.array(accuracy))
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyTM)
expResult = {'x': x[:len(meanAccuracy)], 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
expResults['HTMNumPrediction{:.0f}'.format(numPrediction)] = expResult
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyLSTM)
expResult = {'x': x[:len(meanAccuracy)], 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
expResults['LSTMNumPrediction{:.0f}'.format(numPrediction)] = expResult
(meanAccuracy, stdAccuracy) = calculateMeanStd(accuracyELM)
expResult = {'x': x[:len(meanAccuracy)], 'meanAccuracy': meanAccuracy, 'stdAccuracy': stdAccuracy}
expResults['ELMNumPrediction{:.0f}'.format(numPrediction)] = expResult
output = open('./result/MultiPredictionExperiment.pkl', 'wb')
pickle.dump(expResults, output, -1)
output.close()
except:
print "Cannot find raw experiment results"
print "Plot using saved processed experiment results"
input = open('./result/MultiPredictionExperiment.pkl', 'rb')
expResults = pickle.load(input)
colorList = {"HTMNumPrediction2": "r",
"LSTMNumPrediction2": "g",
"ELMNumPrediction2": "b",
"HTMNumPrediction4": "r",
"LSTMNumPrediction4": "g",
"ELMNumPrediction4": "b"}
modelList = ['HTMNumPrediction2',
'LSTMNumPrediction2',
'ELMNumPrediction2',
'HTMNumPrediction4',
'LSTMNumPrediction4',
'ELMNumPrediction4']
plt.figure(1)
for model in ['HTMNumPrediction2',
'LSTMNumPrediction2',
'ELMNumPrediction2']:
expResult = expResults[model]
plotWithErrBar(expResult['x'],
expResult['meanAccuracy'], expResult['stdAccuracy'],
colorList[model])
plt.legend(['HTM', 'LSTM', 'ELM'], loc=4)
plt.figure(2)
for model in ['HTMNumPrediction4',
'LSTMNumPrediction4',
'ELMNumPrediction4']:
expResult = expResults[model]
plotWithErrBar(expResult['x'],
expResult['meanAccuracy'], expResult['stdAccuracy'],
colorList[model])
plt.legend(['HTM', 'LSTM', 'ELM'], loc=4)
for fig in [1, 2]:
plt.figure(fig)
retrainLSTMAt = np.arange(start=1000, stop=12000, step=1000)
for line in retrainLSTMAt:
plt.axvline(line, color='orange')
plt.ylim([-0.05, 1.05])
# plt.xlim([0, 11000])
plt.figure(1)
plt.savefig('./result/model_performance_2_prediction_errbar.pdf')
plt.figure(2)
plt.savefig('./result/model_performance_4_prediction_errbar.pdf') | agpl-3.0 |
philipwangdk/HPC | HPC_bitbucket/uwhpsc/2013/solutions/homework2/hw2b.py | 2 | 8666 |
"""
Demonstration module for quadratic interpolation.
Sample solutions for Homework 2 problems #2 through #7.
"""
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import solve
def quad_interp(xi,yi):
"""
Quadratic interpolation. Compute the coefficients of the polynomial
interpolating the points (xi[i],yi[i]) for i = 0,1,2.
Returns c, an array containing the coefficients of
p(x) = c[0] + c[1]*x + c[2]*x**2.
"""
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have length 3"
assert len(xi)==3 and len(yi)==3, error_message
# Set up linear system to interpolate through data points:
A = np.vstack([np.ones(3), xi, xi**2]).T
c = solve(A,yi)
return c
def plot_quad(xi, yi):
"""
Perform quadratic interpolation and plot the resulting function along
with the data points.
"""
# Compute the coefficients:
c = quad_interp(xi,yi)
# Plot the resulting polynomial:
x = np.linspace(xi.min() - 1, xi.max() + 1, 1000)
y = c[0] + c[1]*x + c[2]*x**2
plt.figure(1) # open plot figure window
plt.clf() # clear figure
plt.plot(x,y,'b-') # connect points with a blue line
# Add data points (polynomial should go through these points!)
plt.plot(xi,yi,'ro') # plot as red circles
plt.ylim(-2,8) # set limits in y for plot
plt.title("Data points and interpolating polynomial")
plt.savefig('quadratic.png') # save figure as .png file
def cubic_interp(xi,yi):
"""
Cubic interpolation. Compute the coefficients of the polynomial
interpolating the points (xi[i],yi[i]) for i = 0,1,2,3
Returns c, an array containing the coefficients of
p(x) = c[0] + c[1]*x + c[2]*x**2 + c[3]*x**3.
"""
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have length 4"
assert len(xi)==4 and len(yi)==4, error_message
# Set up linear system to interpolate through data points:
A = np.vstack([np.ones(4), xi, xi**2, xi**3]).T
c = solve(A,yi)
return c
def plot_cubic(xi, yi):
"""
Perform cubic interpolation and plot the resulting function along
with the data points.
"""
# Compute the coefficients:
c = cubic_interp(xi,yi)
# Plot the resulting polynomial:
x = np.linspace(xi.min() - 1, xi.max() + 1, 1000)
y = c[0] + c[1]*x + c[2]*x**2 + c[3]*x**3
plt.figure(1) # open plot figure window
plt.clf() # clear figure
plt.plot(x,y,'b-') # connect points with a blue line
# Add data points (polynomial should go through these points!)
plt.plot(xi,yi,'ro') # plot as red circles
plt.ylim(-2,8) # set limits in y for plot
plt.title("Data points and interpolating polynomial")
plt.savefig('cubic.png') # save figure as .png file
def poly_interp(xi,yi):
"""
General polynomial interpolation.
Compute the coefficients of the polynomial
interpolating the points (xi[i],yi[i]) for i = 0,1,2,...,n-1
where n = len(xi) = len(yi).
Returns c, an array containing the coefficients of
p(x) = c[0] + c[1]*x + c[2]*x**2 + ... + c[N-1]*x**(N-1).
"""
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have the same length "
assert len(xi)==len(yi), error_message
# Set up linear system to interpolate through data points:
# Uses a list comprehension, see
# http://docs.python.org/2/tutorial/datastructures.html#list-comprehensions
n = len(xi)
A = np.vstack([xi**j for j in range(n)]).T
c = solve(A,yi)
return c
def plot_poly(xi, yi):
"""
Perform polynomial interpolation and plot the resulting function along
with the data points.
"""
# Compute the coefficients:
c = poly_interp(xi,yi)
# Plot the resulting polynomial:
x = np.linspace(xi.min() - 1, xi.max() + 1, 1000)
# Use Horner's rule:
n = len(xi)
y = c[n-1]
for j in range(n-1, 0, -1):
y = y*x + c[j-1]
plt.figure(1) # open plot figure window
plt.clf() # clear figure
plt.plot(x,y,'b-') # connect points with a blue line
# Add data points (polynomial should go through these points!)
plt.plot(xi,yi,'ro') # plot as red circles
plt.ylim(yi.min()-1, yi.max()+1) # set limits in y for plot
plt.title("Data points and interpolating polynomial")
plt.savefig('poly.png') # save figure as .png file
def test_quad1():
"""
Test code, no return value or exception if test runs properly.
"""
xi = np.array([-1., 0., 2.])
yi = np.array([ 1., -1., 7.])
c = quad_interp(xi,yi)
c_true = np.array([-1., 0., 2.])
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_quad(xi,yi)
def test_quad2():
"""
Test code, no return value or exception if test runs properly.
"""
# Generate a test by specifying c_true first:
c_true = np.array([7., 2., -3.])
# Points to interpolate:
xi = np.array([-1., 0., 2.])
# Function values to interpolate:
yi = c_true[0] + c_true[1]*xi + c_true[2]*xi**2
# Now interpolate and check we get c_true back again.
c = quad_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_quad(xi,yi)
def test_cubic1():
"""
Test code, no return value or exception if test runs properly.
"""
# Generate a test by specifying c_true first:
c_true = np.array([7., -2., -3., 1.])
# Points to interpolate:
xi = np.array([-1., 0., 1., 2.])
# Function values to interpolate:
yi = c_true[0] + c_true[1]*xi + c_true[2]*xi**2 + c_true[3]*xi**3
# Now interpolate and check we get c_true back again.
c = cubic_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_cubic(xi,yi)
def test_poly1():
"""
Test code, no return value or exception if test runs properly.
Same points as test_cubic1.
"""
# Generate a test by specifying c_true first:
c_true = np.array([7., -2., -3., 1.])
# Points to interpolate:
xi = np.array([-1., 0., 1., 2.])
# Function values to interpolate:
# Use Horner's rule:
n = len(xi)
yi = c_true[n-1]
for j in range(n-1, 0, -1):
yi = yi*xi + c_true[j-1]
# Now interpolate and check we get c_true back again.
c = poly_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_poly(xi,yi)
def test_poly2():
"""
Test code, no return value or exception if test runs properly.
Test with 5 points (quartic interpolating function).
"""
# Generate a test by specifying c_true first:
c_true = np.array([0., -6., 11., -6., 1.])
# Points to interpolate:
xi = np.array([-1., 0., 1., 2., 4.])
# Function values to interpolate:
# Use Horner's rule:
n = len(xi)
yi = c_true[n-1]
for j in range(n-1, 0, -1):
yi = yi*xi + c_true[j-1]
# Now interpolate and check we get c_true back again.
c = poly_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_poly(xi,yi)
if __name__=="__main__":
print "Running test..."
test_quad1()
test_quad2()
test_cubic1()
test_poly1()
test_poly2()
| mit |
DonBeo/statsmodels | statsmodels/graphics/tukeyplot.py | 33 | 2473 | from statsmodels.compat.python import range
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.lines as lines
def tukeyplot(results, dim=None, yticklabels=None):
npairs = len(results)
fig = plt.figure()
fsp = fig.add_subplot(111)
fsp.axis([-50,50,0.5,10.5])
fsp.set_title('95 % family-wise confidence level')
fsp.title.set_y(1.025)
fsp.set_yticks(np.arange(1,11))
fsp.set_yticklabels(['V-T','V-S','T-S','V-P','T-P','S-P','V-M',
'T-M','S-M','P-M'])
#fsp.yaxis.set_major_locator(mticker.MaxNLocator(npairs))
fsp.yaxis.grid(True, linestyle='-', color='gray')
fsp.set_xlabel('Differences in mean levels of Var', labelpad=8)
fsp.xaxis.tick_bottom()
fsp.yaxis.tick_left()
xticklines = fsp.get_xticklines()
for xtickline in xticklines:
xtickline.set_marker(lines.TICKDOWN)
xtickline.set_markersize(10)
xlabels = fsp.get_xticklabels()
for xlabel in xlabels:
xlabel.set_y(-.04)
yticklines = fsp.get_yticklines()
for ytickline in yticklines:
ytickline.set_marker(lines.TICKLEFT)
ytickline.set_markersize(10)
ylabels = fsp.get_yticklabels()
for ylabel in ylabels:
ylabel.set_x(-.04)
for pair in range(npairs):
data = .5+results[pair]/100.
#fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data[1], linewidth=1.25,
fsp.axhline(y=npairs-pair, xmin=data.mean(), xmax=data[1], linewidth=1.25,
color='blue', marker="|", markevery=1)
fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data.mean(), linewidth=1.25,
color='blue', marker="|", markevery=1)
#for pair in range(npairs):
# data = .5+results[pair]/100.
# data = results[pair]
# data = np.r_[data[0],data.mean(),data[1]]
# l = plt.plot(data, [npairs-pair]*len(data), color='black',
# linewidth=.5, marker="|", markevery=1)
fsp.axvline(x=0, linestyle="--", color='black')
fig.subplots_adjust(bottom=.125)
results = np.array([[-10.04391794, 26.34391794],
[-21.45225794, 14.93557794],
[ 5.61441206, 42.00224794],
[-13.40225794, 22.98557794],
[-29.60225794, 6.78557794],
[ -2.53558794, 33.85224794],
[-21.55225794, 14.83557794],
[ 8.87275206, 45.26058794],
[-10.14391794, 26.24391794],
[-37.21058794, -0.82275206]])
#plt.show()
| bsd-3-clause |
zivy/SimpleITK-Notebooks | Python/characterize_data.py | 2 | 20622 | import SimpleITK as sitk
import pandas as pd
import numpy as np
import os
import sys
import shutil
import subprocess
import platform
# We use the multiprocess package instead of the official
# multiprocessing as it currently has several issues as discussed
# on the software carpentry page: https://hpc-carpentry.github.io/hpc-python/06-parallel/
import multiprocess as mp
from functools import partial
import argparse
import hashlib
import tempfile
#Maximal number of parallel processes we run.
MAX_PROCESSES = 15
'''
This script inspects/charachterizes images in a given directory structure. It
recuresivly traverses the directories and either inspects the files one by one
or if in DICOM series inspection mode, inspects the data on a per series basis
(all 2D series files combined into a single 3D image).
To run the script one needs to specify:
1. Root of the data directory.
2. Output file name.
3. The analysis type to perform per_file or per_series. The latter indicates
we are only interested in DICOM files. When run using per_file empty lines
in the results file are due to:
a. The file is not an image or is a corrupt image file.
b. SimpleITK was unable to read the image file (contact us with an example).
4. Optional SimpleITK imageIO to use. The default value is
the empty string, indicating that all file types should be read.
To see the set of ImageIO types supported by your version of SimpleITK,
call ImageFileReader::GetRegisteredImageIOs() or simply print an
ImageFileReader object.
5. Optional exteranl applications to run. Their return value (zero or
non zero) is used to log success or failure. A nice example is the
dciodvfy program from David Clunie (https://www.dclunie.com/dicom3tools.html)
which validates compliance with the DICOM standard.
6. When the external applications are provided corrosponding column headings
are also required. These are used in the output csv file.
7. Optional metadata keys. These are image specific keys such as DICOM tags
or other metadata tags that may be found in the image. The content of the
tags is written to the result file.
8. When the metadata tags are provided corrosponding column headings
are also required. These are used in the output csv file.
Examples:
Run a generic file analysis:
python characterize_data.py ../Data/ Output/generic_image_data_report.csv per_file \
--imageIO "" --external_applications ./dciodvfy --external_applications_headings "DICOM Compliant" \
--metadata_keys "0008|0060" "0018|5101" --metadata_keys_headings "modality" "radiographic view"
Run a DICOM series based analysis:
python characterize_data.py ../Data/ Output/DICOM_image_data_report.csv per_series \
--metadata_keys "0008|0060" "0018|5101" --metadata_keys_headings "modality" "radiographic view"
'''
def inspect_image(sitk_image, image_info, current_index, meta_data_keys=[]):
'''
Inspect a SimpleITK image, return a list of parameters characterizing the image.
Parameters
----------
sitk_image (SimpleITK.Image): Input image for inspection.
image_info (list): Image information is written to this list, starting at current_index.
[,,,MD5 intensity hash,
image size, image spacing, image origin, axis direction,
pixel type, min intensity, max intensity,
meta data_1...meta_data_n,,,]
current_index (int): Starting index into the image_info list.
meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to
inspect.
Returns
-------
index to the next empty entry in the image_info list.
The image_info list is filled with the following values:
MD5 intensity hash - Enable identification of duplicate images in terms of intensity.
This is different from SimpleITK image equality where the
same intensities with different image spacing/origin/direction cosine
are considered different images as they occupy a different spatial
region.
image size - number of pixels in each dimension.
pixel type - type of pixels (scalar - gray, vector - gray or color).
min/max intensity - if a scalar image, min and max values.
meta data_i - value of image's metadata dictionary for given key (e.g. .
'''
image_info[current_index] = hashlib.md5(sitk.GetArrayViewFromImage(sitk_image)).hexdigest()
current_index = current_index+1
image_info[current_index] = sitk_image.GetSize()
current_index = current_index + 1
image_info[current_index] = sitk_image.GetSpacing()
current_index = current_index + 1
image_info[current_index] = sitk_image.GetOrigin()
current_index = current_index + 1
image_info[current_index] = sitk_image.GetDirection()
current_index = current_index + 1
if sitk_image.GetNumberOfComponentsPerPixel() == 1: #greyscale image, get the min/max pixel values
image_info[current_index] = sitk_image.GetPixelIDTypeAsString() + ' gray'
current_index = current_index+1
mmfilter = sitk.MinimumMaximumImageFilter()
mmfilter.Execute(sitk_image)
image_info[current_index] = mmfilter.GetMinimum()
current_index = current_index+1
image_info[current_index] = mmfilter.GetMaximum()
current_index = current_index+1
else: # either a color image or a greyscale image masquerading as a color one
pixel_type = sitk_image.GetPixelIDTypeAsString()
channels = [sitk.GetArrayFromImage(sitk.VectorIndexSelectionCast(sitk_image,i)) for i in range(sitk_image.GetNumberOfComponentsPerPixel())]
if np.array_equal(channels[0], channels[1]) and np.array_equal(channels[0], channels[2]):
pixel_type = pixel_type + f' {sitk_image.GetNumberOfComponentsPerPixel()} channels gray'
else:
pixel_type = pixel_type + f' {sitk_image.GetNumberOfComponentsPerPixel()} channels color'
image_info[current_index] = pixel_type
current_index = current_index+3
img_keys = sitk_image.GetMetaDataKeys()
for k in meta_data_keys:
if k in img_keys:
image_info[current_index] = sitk_image.GetMetaData(k)
current_index = current_index+1
return current_index
def inspect_single_file(file_name, imageIO='', meta_data_keys=[], external_programs=[]):
'''
Inspect a file using the specified imageIO, returning a list with the relevant information.
Parameters
----------
file_name (str): Image file name.
imageIO (str): Name of image IO to use. To see the list of registered image IOs use the
ImageFileReader::GetRegisteredImageIOs() or print an ImageFileReader.
The empty string indicates to read all file formats supported by SimpleITK.
meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to
inspect.
external_programs(list(str)): A list of programs we will run with the file_name as input
the return value 'succeeded' or 'failed' is recorded. This is useful
for example if you need to validate conformance to a standard
such as DICOM.
Returns
-------
list with the following entries: [file name, MD5 intensity hash,
image size, image spacing, image origin, axis direction,
pixel type, min intensity, max intensity,
meta data_1...meta_data_n,
external_program_res_1...external_program_res_m]
If the given file is not readable by SimpleITK, the only meaningful entry in the list
will be the file name (all other values will be either None or NaN).
'''
file_info = [None]*(9+len(meta_data_keys)+len(external_programs))
file_info[0] = file_name
current_index = 1
try:
reader = sitk.ImageFileReader()
reader.SetImageIO(imageIO)
reader.SetFileName(file_name)
img = reader.Execute()
current_index = inspect_image(img, file_info, current_index, meta_data_keys)
for p in external_programs:
try:
# run the external programs, check the return value, and capture all output so it
# doesn't appear on screen. The CalledProcessError exception is raised if the
# external program fails (returns non zero value).
subprocess.run([p, file_name], check=True, capture_output=True)
file_info[current_index] = 'succeeded'
except:
file_info[current_index] = 'failed'
current_index = current_index+1
except:
pass
return file_info
def inspect_files(root_dir, imageIO='', meta_data_keys=[], external_programs=[], additional_column_names=[]):
'''
Iterate over a directory structure and return a pandas dataframe with the relevant information for the
image files. This also includes non image files. The resulting dataframe will only include the file name
if that file wasn't successfuly read by SimpleITK. The two reasons for failure are: (1) the user specified
imageIO isn't compatible with the file format (user is only interested in reading jpg and the file
format is mha) or (2) the file could not be read by the SimpleITK IO (corrupt file or unexpected limitation of
SimpleITK).
Parameters
----------
root_dir (str): Path to the root of the data directory. Traverse the directory structure
and inspect every file (also report non image files, in which
case the only valid entry will be the file name).
imageIO (str): Name of image IO to use. To see the list of registered image IOs use the
ImageFileReader::GetRegisteredImageIOs() or print an ImageFileReader.
The empty string indicates to read all file formats supported by SimpleITK.
meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to
inspect.
external_programs(list(str)): A list of programs we will run with the file_name as input
the return value 'succeeded' or 'failed' is recorded. This
is useful for example if you need to validate conformance
to a standard such as DICOM.
additional_column_names (list(str)): Column names corrosponding to the contents of the
meta_data_keys and external_programs lists.
Returns
-------
pandas DataFrame: Each row in the data frame corresponds to a single file.
'''
if len(meta_data_keys) + len(external_programs) != len(additional_column_names):
raise ValueError('Number of additional column names does not match expected.')
column_names = ['file name', 'MD5 intensity hash',
'image size', 'image spacing', 'image origin', 'axis direction',
'pixel type', 'min intensity', 'max intensity'] + additional_column_names
all_file_names = []
for dir_name, subdir_names, file_names in os.walk(root_dir):
all_file_names += [os.path.join(os.path.abspath(dir_name), fname) for fname in file_names]
# Get list of lists describing the results and then combine into a dataframe, faster
# than appending to the dataframe one by one. Use parallel processing to speed things up.
if platform.system() == 'Windows':
res = map(partial(inspect_single_file,
imageIO=imageIO,
meta_data_keys=meta_data_keys,
external_programs=external_programs), all_file_names)
else:
with mp.Pool(processes=MAX_PROCESSES) as pool:
res = pool.map(partial(inspect_single_file,
imageIO=imageIO,
meta_data_keys=meta_data_keys,
external_programs=external_programs), all_file_names)
return pd.DataFrame(res, columns=column_names)
def inspect_single_series(series_data, meta_data_keys=[]):
'''
Inspect a single DICOM series (DICOM heirarchy of patient-study-series-image).
This can be a single file, or multiple files such as a CT or
MR volume.
Parameters
----------
series_data (two entry tuple): First entry is study:series, second entry is the list of
files comprising this series.
meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to
inspect.
Returns
-------
list with the following entries: [study:series, MD5 intensity hash,
image size, image spacing, image origin, axis direction,
pixel type, min intensity, max intensity,
meta data_1...meta_data_n]
'''
series_info = [None]*(9+len(meta_data_keys))
series_info[0] = series_data[1]
current_index = 1
try:
reader = sitk.ImageSeriesReader()
reader.MetaDataDictionaryArrayUpdateOn()
reader.LoadPrivateTagsOn()
_,sid = series_data[0].split(':')
file_names = series_data[1]
# As the files comprising a series with multiple files can reside in
# separate directories and SimpleITK expects them to be in a single directory
# we use a tempdir and symbolic links to enable SimpleITK to read the series as
# a single image. Additionally the files are renamed as they may have resided in
# separate directories with the same file name. Finally, unfortunatly on windows
# we copy the files to the tempdir as the os.symlink documentation says that
# "On newer versions of Windows 10, unprivileged accounts can create symlinks
# if Developer Mode is enabled. When Developer Mode is not available/enabled,
# the SeCreateSymbolicLinkPrivilege privilege is required, or the process must be
# run as an administrator."
with tempfile.TemporaryDirectory() as tmpdirname:
if platform.system() == 'Windows':
for i, fname in enumerate(file_names):
shutil.copy(os.path.abspath(fname),
os.path.join(tmpdirname,str(i)))
else:
for i, fname in enumerate(file_names):
os.symlink(os.path.abspath(fname),
os.path.join(tmpdirname,str(i)))
reader.SetFileNames(sitk.ImageSeriesReader_GetGDCMSeriesFileNames(tmpdirname, sid))
img = reader.Execute()
for k in meta_data_keys:
if reader.HasMetaDataKey(0,k):
img.SetMetaData(k,reader.GetMetaData(0,k))
inspect_image(img, series_info, current_index, meta_data_keys)
except:
pass
return series_info
def inspect_series(root_dir, meta_data_keys=[], additional_column_names=[]):
'''
Inspect all series found in the directory structure. A series does not have to
be in a single directory (the files are located in the subtree and combined
into a single image).
Parameters
----------
root_dir (str): Path to the root of the data directory. Traverse the directory structure
and inspect every series. If the series is comprised of multiple image files
they do not have to be in the same directory. The only expectation is that all
images from the series are under the root_dir.
meta_data_keys(list(str)): The series meta-data dictionary keys whose value we want to
inspect.
additional_column_names (list(str)): Column names corrosponding to the contents of the
meta_data_keys list.
Returns
-------
pandas DataFrame: Each row in the data frame corresponds to a single file.
'''
if len(meta_data_keys) != len(additional_column_names):
raise ValueError('Number of additional column names does not match expected.')
column_names = ['files', 'MD5 intensity hash',
'image size', 'image spacing', 'image origin', 'axis direction',
'pixel type', 'min intensity', 'max intensity'] + additional_column_names
all_series_files = {}
reader = sitk.ImageFileReader()
#collect the file names of all series into a dictionary with the key being
#study:series. This traversal is faster, O(n), than calling GetGDCMSeriesIDs on each
#directory followed by iterating over the series and calling
#GetGDCMSeriesFileNames with the seriesID on that directory, O(n^2).
for dir_name, subdir_names, file_names in os.walk(root_dir):
for file in file_names:
try:
fname = os.path.join(dir_name, file)
reader.SetFileName(fname)
reader.ReadImageInformation()
sid = reader.GetMetaData('0020|000e')
study = reader.GetMetaData('0020|000d')
key = f'{study}:{sid}'
if key in all_series_files:
all_series_files[key].append(fname)
else:
all_series_files[key] = [fname]
except Exception:
pass
# Get list of lists describing the results and then combine into a dataframe, faster
# than appending to the dataframe one by one.
res = [inspect_single_series(series_data, meta_data_keys) for series_data in all_series_files.items()]
return pd.DataFrame(res, columns=column_names)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('root_of_data_directory', help='path to the topmost directory containing data')
parser.add_argument('output_file', help='output csv file path')
parser.add_argument('analysis_type', default = 'per_file', help='type of analysis, "per_file" or "per_series"')
parser.add_argument('--imageIO', default = '', help='SimpleITK imageIO to use for reading (e.g. BMPImageIO)')
parser.add_argument('--external_applications', default = [], nargs='*', help='paths to external applications')
parser.add_argument('--external_applications_headings', default = [], nargs='*', help='titles of the results columns for external applications')
parser.add_argument('--metadata_keys', nargs='*', default = [], help='inspect values of these metadata keys (DICOM tags or other keys stored in the file)')
parser.add_argument('--metadata_keys_headings', default = [], nargs='*', help='titles of the results columns for the metadata_keys')
args = parser.parse_args(argv)
if len(args.external_applications)!= len(args.external_applications_headings):
print('Number of external applications and their headings do not match.')
sys.exit(1)
if len(args.metadata_keys)!= len(args.metadata_keys_headings):
print('Number of metadata keys and their headings do not match.')
sys.exit(1)
if args.analysis_type not in ['per_file', 'per_series']:
print('Unexpected analysis type.')
sys.exit(1)
if args.analysis_type == 'per_file':
df = inspect_files(args.root_of_data_directory,
imageIO=args.imageIO,
meta_data_keys = args.metadata_keys,
external_programs=args.external_applications,
additional_column_names= args.metadata_keys_headings + args.external_applications_headings)
df.to_csv(args.output_file, index=False)
sys.exit(0)
if args.analysis_type == 'per_series':
df = inspect_series(args.root_of_data_directory,
meta_data_keys = args.metadata_keys,
additional_column_names=args.metadata_keys_headings)
df.to_csv(args.output_file, index=False)
sys.exit(0)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
Tong-Chen/scikit-learn | sklearn/feature_selection/__init__.py | 7 | 1056 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/cluster/_bicluster.py | 2 | 20639 | """Spectral biclustering algorithms."""
# Authors : Kemal Eren
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..utils import check_random_state
from ..utils.extmath import (make_nonnegative, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X, y=None):
"""Creates a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
y : Ignored
"""
X = self._validate_data(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
def _more_tags(self):
return {
"_xfail_checks": {
"check_estimators_dtypes": "raises nan error",
"check_fit2d_1sample": "_scale_normalize fails",
"check_fit2d_1feature": "raises apply_along_axis error",
"check_estimator_sparse_data": "does not fail gracefully",
"check_methods_subset_invariance": "empty array passed inside",
"check_dont_overwrite_parameters": "empty array passed inside",
"check_fit2d_predict1d": "emptry array passed inside",
}
}
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : int, default=3
The number of biclusters to find.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random', or ndarray of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like of shape (n_cols,)
The bicluster label of each column.
biclusters_ : tuple of two ndarrays
The tuple contains the `rows_` and `columns_` arrays.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.cluster import SpectralCoclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_ #doctest: +SKIP
array([0, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_ #doctest: +SKIP
array([0, 0], dtype=int32)
>>> clustering
SpectralCoclustering(n_clusters=2, random_state=0)
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, *, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, random_state=None):
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c
for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c
for c in range(self.n_clusters)])
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3
The number of row and column clusters in the checkerboard
structure.
method : {'bistochastic', 'scale', 'log'}, default='bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'.
.. warning::
if `method='log'`, the data must be sparse.
n_components : int, default=6
Number of singular vectors to check.
n_best : int, default=3
Number of best singular vectors to which to project the data
for clustering.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
:func:`~sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random'} or ndarray of (n_clusters, n_features), \
default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
Row partition labels.
column_labels_ : array-like of shape (n_cols,)
Column partition labels.
biclusters_ : tuple of two ndarrays
The tuple contains the `rows_` and `columns_` arrays.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.cluster import SpectralBiclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_
array([0, 1], dtype=int32)
>>> clustering
SpectralBiclustering(n_clusters=2, random_state=0)
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, *, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, random_state=None):
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super()._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError) as e:
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)") from e
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack([self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)])
self.columns_ = np.vstack([self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)])
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/ensemble/gradient_boosting.py | 18 | 71095 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
cauchycui/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
3manuek/deeppy | examples/convnet_cifar.py | 4 | 3013 | #!/usr/bin/env python
"""
Convnets for image classification (2)
=====================================
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import deeppy as dp
# Fetch CIFAR10 data
dataset = dp.dataset.CIFAR10()
x_train, y_train, x_test, y_test = dataset.data(dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.SupervisedInput(x_test, y_test, batch_size=batch_size)
# Setup network
def conv_layer(n_filters):
return dp.Convolution(
n_filters=32,
filter_shape=(5, 5),
border_mode='full',
weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.003),
)
def pool_layer():
return dp.Pool(
win_shape=(3, 3),
strides=(2, 2),
border_mode='same',
method='max',
)
net = dp.NeuralNetwork(
layers=[
conv_layer(32),
dp.Activation('relu'),
pool_layer(),
conv_layer(32),
dp.Activation('relu'),
pool_layer(),
conv_layer(64),
dp.Activation('relu'),
pool_layer(),
dp.Flatten(),
dp.DropoutFullyConnected(
n_out=64,
weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.03)
),
dp.Activation('relu'),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller(gain=1.25)),
)
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
def val_error():
return net.error(test_input)
n_epochs = [8, 8]
learn_rate = 0.05
for i, max_epochs in enumerate(n_epochs):
lr = learn_rate/10**i
trainer = dp.StochasticGradientDescent(
max_epochs=max_epochs,
learn_rule=dp.Momentum(learn_rate=lr, momentum=0.9),
)
trainer.train(net, train_input, val_error)
# Evaluate on test data
error = net.error(test_input)
print('Test error rate: %.4f' % error)
# Plot image examples.
def plot_img(img, title):
plt.figure()
plt.imshow(img, interpolation='nearest')
plt.title(title)
plt.axis('off')
plt.tight_layout()
img_bhwc = np.transpose(x_train[:70], (0, 2, 3, 1))
img_tile = dp.misc.img_tile(dp.misc.img_stretch(img_bhwc), aspect_ratio=0.75,
border_color=1.0)
plot_img(img_tile, title='CIFAR10 example images')
# Plot convolutional filters.
filters = [l.weights.array for l in net.layers
if isinstance(l, dp.Convolution)]
fig = plt.figure()
gs = matplotlib.gridspec.GridSpec(2, 2, height_ratios=[1, 3])
subplot_idxs = [0, 2, 3]
for i, f in enumerate(filters):
ax = plt.subplot(gs[subplot_idxs[i]])
ax.imshow(dp.misc.conv_filter_tile(f), cmap='gray',
interpolation='nearest')
ax.set_title('Conv layer %i' % i)
ax.axis('off')
plt.tight_layout()
| mit |
bigartm/visartm | research/models.py | 1 | 8234 | from django.db import models
from datetime import datetime
import traceback
import os
from django.conf import settings
from datasets.models import Dataset
from models.models import ArtmModel
from assessment.models import AssessmentProblem
from django.contrib.auth.models import User
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from shutil import rmtree
from django.contrib import admin
class Research(models.Model):
dataset = models.ForeignKey(Dataset, null=True, blank=True)
model = models.ForeignKey(ArtmModel, null=True, blank=True)
problem = models.ForeignKey(AssessmentProblem, null=True, blank=True)
researcher = models.ForeignKey(User, null=False)
script_name = models.TextField(null=False)
start_time = models.DateTimeField(null=False, default=datetime.now)
finish_time = models.DateTimeField(null=True, blank=True)
sealed = models.BooleanField(default=False)
# 1-running,2-OK,3-errror,4-interrupted, 5-backup
status = models.IntegerField(null=False, default=0)
error_message = models.TextField(null=True, blank=True)
is_private = models.BooleanField(default=False)
def run(self):
with open(self.get_report_file(), "w", encoding="utf-8") as f:
f.write("<html>\n<head><meta charset='utf-8'></head>\n<body>")
f.write("<h1>Research report</h1>\n")
f.write("<p>Research id: %d<br>\n" % self.id)
f.write("Dataset: %s<br>\n" % str(self.dataset))
if self.model:
f.write("Model: %s (id=%d)<br>\n" %
(str(self.model), self.model.id))
if self.problem:
f.write("Assesment problem: %s<br>\n" % str(self.problem))
f.write("Script: %s<br>\n" % self.script_name)
f.write("Researcher: %s<br>\n" % self.researcher.username)
f.write("Research started: %s</p>\n" %
self.start_time.strftime("%d.%m.%y %H:%M:%S"))
f.write("<hr>\n")
script_file_name = os.path.join(
settings.BASE_DIR, "algo", "research", self.script_name)
self.img_counter = 0
try:
with open(script_file_name, "r", encoding="utf-8") as f:
code = compile(f.read(), script_file_name, "exec")
exec(code, {"research": self})
except BaseException:
self.status = 3
self.error_message = traceback.format_exc()
self.finish_time = datetime.now()
self.save()
return
self.finish_time = datetime.now()
self.status = 2
self.save()
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write("<hr>\n")
f.write("<p>Research finished: %s</p>\n" %
self.finish_time.strftime("%d.%m.%y %H:%M:%S"))
f.write("</body>\n</html>\n")
def report_html(self, text):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write(text + "\n")
def report(self, text):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write(text + "<br>\n")
def log(self, text):
self.report("[LOG] %s" % text)
def report_p(self, text=""):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write("<p>" + text + "</p>\n")
def gca(self, figsize=None):
self.figure = self.get_figure(figsize=figsize)
return self.figure.gca()
def get_figure(self, figsize=None):
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
self.figure = plt.figure(figsize=figsize)
return self.figure
def show_matrix(self, m):
self.gca().imshow(m, interpolation="nearest")
self.report_picture()
def report_picture(
self,
height=400,
width=400,
align='left',
bbox_extra_artists=None,
name=None):
self.img_counter += 1
file_name = str(self.img_counter) + '.png'
eps_file_name = str(self.img_counter) + '.eps'
if name:
eps_file_name = name + ".eps"
self.figure.savefig(
os.path.join(
self.get_pic_folder(),
eps_file_name),
bbox_extra_artists=bbox_extra_artists,
bbox_inches='tight')
self.figure.savefig(
os.path.join(
self.get_pic_folder(),
file_name),
bbox_extra_artists=bbox_extra_artists,
bbox_inches='tight')
self.figure.clf()
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write(
("<div align='%s'><a href='pic/%s'>"
"<img src='pic/%s' width='%d' heigth='%d' />"
"</a></div>\n") %
(align, eps_file_name, file_name, width, height))
del self.figure
def latex_table(self, table, format):
nrows = len(table)
ncols = len(table[0])
ans = "\\begin{tabular}{|%s|}\n" % "|".join(
["c" for i in range(ncols)])
for row in table:
ans += "\\hline\n"
for i in range(ncols):
ans += (format % row[i])
if i == ncols - 1:
ans += " \\\\\n"
else:
ans += " & "
ans += "\\hline\n"
ans += "\\end{tabular}\n"
return ans
def report_table(self, table, format="%s"):
with open(self.get_report_file(), "a", encoding="utf-8") as f:
f.write('<table border="1" cellpadding="0" cellspacing="0">\n')
for row in table:
f.write("<tr>\n")
for cell in row:
if format:
f.write("<td>")
f.write(format % cell)
f.write("</td>")
f.write("</tr>\n")
f.write("</table>\n")
self.img_counter += 1
f.write(
"<p><a href='pic/%d.txt'>Table in LaTeX</a></p>" %
self.img_counter)
table_file = os.path.join(self.get_pic_folder(),
str(self.img_counter) + '.txt')
with open(table_file, "w", encoding='utf-8') as f:
f.write(self.latex_table(table, format))
def get_folder(self):
path = os.path.join(settings.DATA_DIR, "research", str(self.id))
if not os.path.exists(path):
os.makedirs(path)
return path
def get_pic_folder(self):
path = os.path.join(settings.DATA_DIR, "research", str(self.id), "pic")
if not os.path.exists(path):
os.makedirs(path)
return path
def get_report_file(self):
return os.path.join(self.get_folder(), "report.html")
def __str__(self):
return "Research %d (%s, %s)" % (
self.id, str(self.dataset), self.script_name)
def duration(self):
if self.finish_time:
dt = self.finish_time - self.start_time
else:
dt = datetime.now() - self.start_time
seconds = dt.seconds
return "{:02}:{:02}".format(seconds // 60, seconds % 60)
def on_start():
# print ("ENTRY POINT 2")
for research in Research.objects.filter(status=1):
research.status = 4
research.save()
@receiver(pre_delete, sender=Research, dispatch_uid='research_delete_signal')
def remove_research_files(sender, instance, using, **kwargs):
if instance.sealed:
backup = Research()
backup.researcher = instance.researcher
backup.status = 5
backup.sealed = True
backup.start_time = instance.start_time
backup.finish_time = instance.finish_time
backup.script_name = instance.script_name
backup.save()
os.rename(
instance.get_folder(), os.path.join(
settings.DATA_DIR, "research", str(
backup.id)))
else:
try:
rmtree(instance.get_folder())
except BaseException:
pass
admin.site.register(Research)
| bsd-3-clause |
lariszakrista/EMP | src/classification/image_data.py | 2 | 6825 | import json
import os
from random import shuffle
import cv2
import numpy as np
from sklearn.model_selection import StratifiedKFold
import constants as const
import utils
def _open_single_image(path, squash, dim):
img = cv2.imread(path, cv2.IMREAD_COLOR)
if img is None:
raise cv2.error('Failed to open {}'.format(os.path.basename(fpath)))
if not squash:
sq_dim = min(img.shape[0], img.shape[1])
yshift = int((img.shape[0] - sq_dim) / 2)
xshift = int((img.shape[1] - sq_dim) / 2)
yadd = img.shape[0] - (2 * sq_dim)
xadd = img.shape[1] - (2 * sq_dim)
img = img[yshift:(img.shape[0] - yshift - yadd), xshift:(img.shape[1] - xshift - xadd)]
return cv2.resize(img, dim)
def _open_images(img_paths, squash, dim):
images = list()
for path in img_paths:
images.append(_open_single_image(path, squash, dim))
return images
class ImageDataBase(object):
def __init__(self, one_hot=True, labels_file=None, labeled_data_file=None):
self.one_hot = one_hot
self.labels = None
self.data = None
# Labels and their corresponding indices
if labels_file is not None:
with open(labels_file) as f:
self.labels = json.load(f)
# Load data and record number of training examples
if labeled_data_file is not None:
with open(labeled_data_file) as f:
self.data = json.load(f)
def get_feature_name(self, idx):
if self.labels is None:
raise ValueError('Dataset features are unnamed')
for k, v in self.labels.items():
if v == idx:
return k
raise KeyError
def get_dataset(self, set_keys):
xs = []
ys = []
for k in set_keys:
try:
x, y = self._get_xy(k)
xs.append(x)
ys.append(y)
except Exception as e:
print('Error obtaining x or y value. Skipping. Error: {}'.format(e))
pass
return np.array(xs), np.array(ys)
@staticmethod
def get_class_name(y):
try:
return utils.decode_totality_prediction(y)
except IndexError:
return const.TOTALITY if y else const.NON_TOTALITY
def get_dim(self):
return self._get_in_dim(), self._get_out_dim()
def _get_in_dim(self):
if self.labels is None:
raise ValueError('Input dimension not set')
return len(self.labels)
def _get_out_dim(self):
if self.one_hot:
return 2
else:
return 1
def _get_xy(self, key):
return self._get_x(key), self._get_y(key)
def _get_x(self, key):
"""
Assumens self.labels is set. Otherwise will raise an exception. If using a subclass
that does not use self.labels, this method should be overridden.
"""
feature_vec = np.zeros(len(self.labels))
for l in self.data[key]['labels']:
feature_idx = self.labels[l[0]]
feature_vec[feature_idx] = l[1]
return feature_vec
def _get_y(self, key):
totality = (self.data[key]['classification'] == const.TOTALITY)
if self.one_hot:
y = [int(v) for v in (totality, not totality)]
else:
y = int(totality)
return y
class ImageDataSimpleSplit(ImageDataBase):
def __init__(self, train_ratio=0.7, *args, **kwargs):
super().__init__(*args, **kwargs)
num_train = int(len(self.data) * train_ratio)
# Shuffle keys so that data is dispersed among train/test sets
keys = list(self.data.keys())
shuffle(keys)
# Choose training/test examples
self.train_keys = keys[:num_train]
self.test_keys = keys[num_train:]
def get_train(self):
return self.get_dataset(self.train_keys)
def get_test(self):
return self.get_dataset(self.test_keys)
class ImageDataKFold(ImageDataBase):
def __init__(self, nfolds, custom_data=None, **kwargs):
super().__init__(**kwargs)
if custom_data is not None:
self.xs, self.ys = custom_data
else:
# Convert entire dataset into feature vectors and
# y labels
self.xs, self.ys = self.get_dataset(self.data.keys())
skf = StratifiedKFold(nfolds, shuffle=True)
# split likes Y parameter (param 2) to have shape
# (n_samples, )
self.folds = skf.split(self.xs, [item[0] for item in self.ys])
def get_all(self):
return self.xs, self.ys
def get_folds(self):
"""
Generator returning k folds
"""
while True:
# StratifiedKFold.split returns a generator
# Get the next set of indices from the generator
train_idx, test_idx = next(self.folds)
# Get feature vectors and y labels
res = list()
for idx in (train_idx, test_idx):
xs = [self.xs[i] for i in idx]
ys = [self.ys[i] for i in idx]
res.append((np.array(xs), np.array(ys)))
yield res
def _get_in_dim(self):
return self.xs[0].shape[0]
class ImageDataNP(ImageDataSimpleSplit):
IMG_DIM = (224, 224, 3)
def __init__(self, img_dir, squash=False, **kwargs):
super().__init__(**kwargs)
self.img_dir = img_dir
self.squash = squash
def _get_in_dim(self):
return self.IMG_DIM
def _get_x(self, key):
fpath = os.path.join(self.img_dir, os.path.basename(key))
return self._open_img(fpath)
def _open_img(self, fpath):
print('Opening {}'.format(os.path.basename(fpath)))
return _open_single_image(fpath, self.squash, self.IMG_DIM[:2])
class PredictionWriter(object):
def __init__(self):
self.predictions = dict()
def add(self, key, pred, y):
v = {
'labels': pred,
'classification': ImageDataBase.get_class_name(y)
}
self.predictions[key] = v
def commit(self, fpath):
with open(fpath, 'w') as f:
f.write(json.dumps(self.predictions))
class ImageSet(object):
def __init__(self, img_paths, squash=True, dim = (224, 224)):
self.img_paths = img_paths
self.squash = squash
self.dim = dim
def get_batches(self, batch_size=32):
start = 0
end = min(batch_size, len(self.img_paths))
while start < len(self.img_paths):
images = _open_images(self.img_paths[start:end], self.squash, self.dim)
yield images, start, end
start = end
end = min(end + batch_size, len(self.img_paths))
raise StopIteration
| apache-2.0 |
jor-/scipy | scipy/integrate/_bvp.py | 4 | 41187 | """Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
bc_tol : float
Tolerance to which we want to satisfy the boundary conditions.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(np.abs(bc_res) < bc_tol)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Max BC residual", "Total nodes",
"Nodes added"))
def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
nodes_added):
print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, bc_residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system.",
3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
bc_tol : float, optional
Desired absolute tolerance for the boundary condition residuals: `bc`
value should satisfy ``abs(bc) < bc_tol`` component-wise.
Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
tolerance.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
if bc_tol is None:
bc_tol = tol
# Maximum number of iterations
max_iteration = 10
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol, bc_tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
max_bc_res = np.max(abs(bc_res))
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, max_bc_res,
m, nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
elif max_bc_res <= bc_tol:
status = 0
break
elif iteration >= max_iteration:
status = 3
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, x.shape[0], max_rms_res, max_bc_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 3:
print("The solver was unable to satisfy boundary conditions "
"tolerance on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
tttor/csipb-jamu-prj | predictor/connectivity/cluster/cluster.py | 1 | 4295 | # cluster.py
import os
import sys
import time
import json
import numpy as np
import sklearn.metrics as met
from collections import defaultdict
from scoop import futures as fu
from scoop import shared as sh
sys.path.append('../../utility')
import util
import yamanishi_data_util as yam
# np.random.seed(0)
DATASET_DIR = '../../dataset/connectivity/compound_vs_protein'
def main():
if len(sys.argv)!=6:
print 'USAGE:'
print 'python -m scoop cluster.py [method] [nIter] [dataset#x] [compound/protein] [outDir]'
return
method = sys.argv[1]
nIter = int(sys.argv[2])
dataset = sys.argv[3]
mode = sys.argv[4]
outDir = sys.argv[5]
outDir = os.path.join(outDir,
'-'.join([method+'#'+str(nIter),dataset,mode,util.tag()]))
os.makedirs(outDir)
##
print 'loading data...'
dParam = dataset.split('#')
disMat = None; iList = None
if dParam[0]=='yamanishi':
dataDir = os.path.join(DATASET_DIR,dParam[0])
simDict = yam.loadKernel2(mode,dParam[1],os.path.join(dataDir,'similarity-mat'))
simMat,iList = util.makeKernelMatrix(simDict)
disMat = util.kernel2distanceMatrix('naive',simMat)
else:
assert False,'FATAL: unknown dataset'
##
print 'clustering...'
paramList = []
if method=='dbscan':
epsMin,epsMax = [0.0,1.0]
nMin,nMax = [1,len(iList)]
for i in range(nIter):
eps = np.random.uniform(epsMin,epsMax,1)[0]
n = np.random.randint(nMin,nMax,1)[0]
paramList.append( dict(eps=eps,min_samples=n) )
else:
assert False
sh.setConst(method=method)
sh.setConst(mat=disMat)
resList = list( fu.map(_cluster,paramList) )
bestResIdxCal = _getBestResultIdx(resList,'calinski_harabaz_score')
bestResIdxSil = _getBestResultIdx(resList,'silhouette_score')
resDictCal = dict( zip(iList,resList[bestResIdxCal][0]) )
resDictSil = dict( zip(iList,resList[bestResIdxSil][0]) )
bestParamCal = dict(param=paramList[bestResIdxCal],
score=resList[bestResIdxCal][1])
bestParamSil = dict(param=paramList[bestResIdxSil],
score=resList[bestResIdxSil][1])
##
print 'writing result...'
def _writeLabelAndParam(metric,resDict,paramDict):
resDict2 = defaultdict(list); resDict3 = defaultdict(list)
for k,v in resDict.iteritems(): resDict2[v].append(k)
for k,v in resDict2.iteritems(): resDict3[k].append(len(v))
summ = sum([v[0] for v in resDict3.values()])
for k,v in resDict3.iteritems(): resDict3[k].append(float(v[0])/summ)
fname = '_'.join([mode,metric])
with open(os.path.join(outDir,fname+"_bestlabels.json"),'w') as f:
json.dump(resDict,f,indent=2,sort_keys=True)
with open(os.path.join(outDir,fname+"_bestlabels_stat.json"),'w') as f:
json.dump(resDict3,f,indent=2,sort_keys=True)
with open(os.path.join(outDir,fname+"_bestparams.json"),'w') as f:
json.dump(paramDict,f,indent=2,sort_keys=True)
_writeLabelAndParam('calinskiharabaz',resDictCal,bestParamCal)
_writeLabelAndParam('silhouette',resDictSil,bestParamSil)
def _cluster(params):
cls = None
method = sh.getConst('method')
if method=='kmedoid':
assert False
# from kmedoid import kmedsoid
# cls = kmedoid
elif method=='dbscan':
from sklearn.cluster import DBSCAN
cls = DBSCAN(eps=params['eps'],min_samples=params['min_samples'],
metric='precomputed')
else:
assert False, 'FATAL: unknown cluster method'
##
mat = sh.getConst('mat')
labels = cls.fit_predict(mat)
nLabels = len(set(labels))
##
sil = None; cal = None
if (nLabels >= 2)and(nLabels <= len(labels)-1):
sil = met.silhouette_score(mat,labels,'precomputed')
cal = met.calinski_harabaz_score(mat,labels)
perf = dict(silhouette_score=sil,calinski_harabaz_score=cal)
return (labels,perf)
def _getBestResultIdx(resList,metric):
mets = [i[1][metric] for i in resList]
return mets.index(max(mets))
if __name__ == '__main__':
tic = time.time()
main()
print "main took: "+str(time.time()-tic)+' seconds'
| mit |
mendax-grip/cfdemUtilities | phillips/compareMonitorDrag.py | 2 | 2625 | #This programs compares two log file of two different openfoam cases being run (or that have finished, etc.)
# The comparison that is carried out here is on the drag force
# Author : Bruno Blais
# Last modified : 23-01-2014
#Python imports
#---------------------------------------
import os
import sys
import numpy
import time
import matplotlib.pyplot as plt
import re # Ouhh regular expressions :)
#-----------------------------------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#refresh frequency
#=============================
# READER OF LOG FILE
#=============================
# This function reads the log file and extracts the torque
def readf(fname):
t=[]
dragx=[]
dragy=[]
dragz=[]
patternVariable = re.compile('Volume CPU dragtota')
patternTime = re.compile('Time =')
patternTimeExclude = re.compile('Execution')
infile = open(fname,'r')
if (infile!=0):
print "Log file opened"
l_prev="init"
for l in infile:
if patternVariable.search(l_prev):
l_str = l.split()
dragx.extend([float(l_str[5])])
dragy.extend([float(l_str[6])])
dragz.extend([float(l_str[7])])
if patternTime.search(l):
if not patternTimeExclude.search(l):
l_str= l.split()
l2_num=float(l_str[2])
t.extend([l2_num])
l_prev=l
else:
print "File %s could not be opened" %fname
return t, dragx, dragy, dragz
infile.close();
#======================
# MAIN
#======================
# Get names from terminal
fname=[]
for i in range(0,len(sys.argv)-1) : fname.extend([sys.argv[i+1]])
#plt.ion() # interactive mode is on
plt.figure()
line1 = plt.plot([], [],'-k') #create structure that will be updated
plt.ylabel('Drag force on the particles [N]')
plt.xlabel('Time [s]')
plt.title('Dynamic evolution of the average drag on the particles')
#### Plot 1
[t,dragx,dragy,dragz] = readf(fname[0])
plt.plot(t,dragx[2:],'-b',label = fname[0])
#### Plot 2
[t,dragx,dragy,dragz] = readf(fname[1])
plt.plot(t,dragx[2:],'-r',label = fname[1])
#### Plot 3
if len(sys.argv) >= 4 :
[t,dragx,dragy,dragz] = readf(fname[2])
plt.plot(t,dragx[2:],'-g',label = fname[2])
#### Plot 4
if len(sys.argv) >= 5 :
[t,dragx,dragy,dragz] = readf(fname[3])
plt.plot(t,dragx[2:],'-y',label = fname[3])
#### Plot 5
if len(sys.argv) >= 6 :
[t,dragx,dragy,dragz] = readf(fname[4])
plt.plot(t,dragx[2:],'-m',label = fname[4])
#### Plot 6
if len(sys.argv) >= 7 :
[t,dragx,dragy,dragz] = readf(fname[5])
plt.plot(t,dragx[2:],'-m',label = fname[5])
plt.legend()
plt.show()
| lgpl-3.0 |
rspavel/spack | var/spack/repos/builtin/packages/py-torch-geometric/package.py | 1 | 1971 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyTorchGeometric(PythonPackage):
"""PyTorch Geometric (PyG) is a geometric deep learning extension
library for PyTorch. It consists of various methods for deep
learning on graphs and other irregular structures, also known as
geometric deep learning, from a variety of published papers. In
addition, it consists of an easy-to-use mini-batch loader for many
small and single giant graphs, multi gpu-support, a large number
of common benchmark datasets (based on simple interfaces to create
your own), and helpful transforms, both for learning on arbitrary
graphs as well as on 3D meshes or point clouds."""
homepage = "https://github.com/rusty1s/pytorch_geometric"
url = "https://github.com/rusty1s/pytorch_geometric/archive/1.6.0.tar.gz"
version('1.6.0', sha256='7d5231cdcc2ebd4444f406cbf1537eb49bf90ab6f446eaf1b7af5cdbe105f3c9')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', type='build')
depends_on('py-torch', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-networkx', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
depends_on('py-numba', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-rdflib', type=('build', 'run'))
depends_on('py-googledrivedownloader', type=('build', 'run'))
depends_on('py-h5py~mpi', type=('build', 'run'))
depends_on('py-ase', type=('build', 'run'))
depends_on('py-jinja2', type=('build', 'run'))
| lgpl-2.1 |
fritzo/loom | loom/preql.py | 1 | 31258 | # Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import copy
import csv
import math
from contextlib import contextmanager
from itertools import izip
from collections import Counter
from distributions.io.stream import json_load
from distributions.io.stream import open_compressed
from StringIO import StringIO
import numpy
from sklearn.cluster import SpectralClustering
from loom.format import load_decoder
from loom.format import load_encoder
import loom.store
import loom.query
import loom.group
SAMPLE_COUNT = 1000
class CsvWriter(object):
def __init__(self, outfile, returns=None):
writer = csv.writer(outfile)
self.writerow = writer.writerow
self.writerows = writer.writerows
self.result = returns if returns else lambda: None
@contextmanager
def csv_output(arg):
if arg is None:
outfile = StringIO()
yield CsvWriter(outfile, returns=outfile.getvalue)
elif hasattr(arg, 'write'):
yield CsvWriter(arg)
else:
with open_compressed(arg, 'w') as outfile:
yield CsvWriter(outfile)
@contextmanager
def csv_input(arg):
if hasattr(arg, 'read'):
yield csv.reader(arg)
else:
with open_compressed(arg, 'rb') as infile:
yield csv.reader(infile)
class PreQL(object):
'''
PreQL - Predictive Query Language server object.
Data are assumed to be in csv format. Data can be read from and written to
file or can be passed around as StringIO objects.
To convert among csv and pandas dataframes, use the transforms:
input = StringIO(input_df.to_csv()) # input_df is a pandas.DataFrame
output_df = pandas.read_csv(StringIO(output))
Usage in scripts:
with loom.preql.get_server('/absolute/path/to/dataset') as preql:
preql.predict(...)
preql.relate(...)
preql.refine(...)
preql.support(...)
preql.group(...)
Usage in iPython notebooks:
preql = loom.preql.get_server('/absolute/path/to/dataset')
preql.predict(...)
preql.relate(...)
preql.refine(...)
preql.support(...)
preql.group(...)
preql.close()
Methods:
predict(rows_csv, count, result_out, ...)
Draw samples from the posterior conditioned on rows in rows_csv.
relate(columns, result_out, ...)
Quantify dependency among columns and all other features.
refine(target_feature_sets, query_feature_sets, conditioning_row, ...)
Determine which queries would inform target features, in context.
support(target_feature_sets, known_feature_sets, conditioning_row, ...)
Determine which knolwedge has informed target features, in context.
group(column, result_out)
Cluster rows according to target column and related columns.
Properties:
feature_names - a list of all feature names
converters - a dict of converters for use in pandas.read_csv
'''
def __init__(self, query_server, encoding=None, debug=False):
self._paths = loom.store.get_paths(query_server.root)
if encoding is None:
encoding = self._paths['ingest']['encoding']
self._query_server = query_server
self._encoders = json_load(encoding)
transforms = self._paths['ingest']['transforms']
self._transform = loom.transforms.load_transforms(transforms)
self._feature_names = [e['name'] for e in self._encoders]
self._feature_set = frozenset(self._feature_names)
self._name_to_pos = {
name: i
for i, name in enumerate(self._feature_names)
}
self._name_to_decode = {
e['name']: load_decoder(e)
for e in self._encoders
}
self._name_to_encode = {
e['name']: load_encoder(e)
for e in self._encoders
}
self._rowid_map = None
self._debug = debug
@property
def feature_names(self):
return self._feature_names[:] # copy in lieu of frozenlist
@property
def converters(self):
convert = lambda string: string if string else None
return {name: convert for name in self._feature_names}
@property
def rowid_map(self):
if self._rowid_map is None:
filename = self._paths['ingest']['rowids']
with loom.util.csv_reader(filename) as reader:
self._rowid_map = {
int(internal_id): external_id
for internal_id, external_id in reader
}
return self._rowid_map
def close(self):
self._query_server.close()
def __enter__(self):
return self
def __exit__(self, *unused):
self.close()
def _cols_to_mask(self, cols):
cols = set(cols)
fnames = enumerate(self._feature_names)
return frozenset(i for i, fname in fnames if fname in cols)
def _validate_feature_set(self, feature_set):
if len(feature_set) == 0:
raise ValueError('empty feature set: '.format(feature_set))
for name in feature_set:
if name not in self._feature_set:
raise ValueError('invalid feature: {}'.format(name))
def _validate_feature_sets(self, feature_sets):
for s in feature_sets:
self._validate_feature_set(s)
sets = set(feature_sets)
if len(sets) != len(feature_sets):
raise ValueError('duplicate sets in feature sets: {}'.format(sets))
sum_len = sum(len(s) for s in feature_sets)
len_sum = len(frozenset.union(*feature_sets))
if sum_len != len_sum:
raise ValueError('feature sets are not disjoint: {}'.format(sets))
def _encode_row(self, row):
if len(row) != len(self._feature_names):
raise ValueError('invalid row (bad length): {}'.format(row))
encoded_row = []
for pos, value in enumerate(row):
if value:
assert isinstance(value, str), value
encode = self._name_to_encode[self._feature_names[pos]]
try:
encoded_row.append(encode(value))
except:
raise ValueError(
'bad value at position {}: {}'.format(pos, value))
else:
encoded_row.append(None)
return encoded_row
def _decode_row(self, row):
if len(row) != len(self._feature_names):
raise ValueError('invalid row (bad length): {}'.format(row))
decoded_row = []
for pos, value in enumerate(row):
if value is not None:
decode = self._name_to_decode[self._feature_names[pos]]
try:
decoded_row.append(decode(value))
except:
raise ValueError(
'bad value at position {}: {}'.format(pos, value))
else:
decoded_row.append(None)
return decoded_row
def encode_set(self, feature_set):
return self._feature_set & self._transform.forward_set(feature_set)
def encode_row(self, row, header=None):
features = self._feature_names
if header is None:
header = features
if row is None:
row = [None] * len(features)
else:
if isinstance(row, dict):
row = self._transform.forward_dict(features, row)
else:
row = self._transform.forward_row(header, features, row)
row = self._encode_row(row)
return row
def decode_row(self, row, header=None):
features = self._feature_names
if header is None:
header = features
row = self._decode_row(row)
row = self._transform.backward_row(features, header, row)
return row
def _normalized_mutual_information(
self,
feature_set1,
feature_set2,
entropys=None,
conditioning_row=None,
sample_count=None):
mi = self._query_server.mutual_information(
feature_set1=feature_set1,
feature_set2=feature_set2,
entropys=entropys,
conditioning_row=conditioning_row,
sample_count=sample_count).mean
return normalize_mutual_information(mi)
def predict(self, rows_csv, count, result_out=None, id_offset=True):
'''
Samples from the conditional joint distribution.
Inputs:
rows_csv - filename/file handle/StringIO of input conditional rows
count - number of samples to generate for each input row
result_out - filename/file handle/StringIO of output samples,
or None to return a csv string
id_offset - whether to ignore column 0 as an unused id column
Outputs:
A csv with filled-in data rows sampled from the
joint conditional posterior distribution.
Example:
Assume 'rows.csv' has already been written.
>>> print open('rows.csv').read()
feature0,feature1,feature2
,,
0,,
1,,
>>> preql.predict('rows.csv', 2, 'result.csv', id_offset=False)
>>> print open('result.csv').read()
feature0,feature1,feature2
0.5,0.1,True
0.5,0.2,True
0,1.5,False
0,1.3,True
1,0.1,False
1,0.2,False
'''
with csv_output(result_out) as writer:
with csv_input(rows_csv) as reader:
self._predict(reader, count, writer, id_offset)
return writer.result()
def _predict(self, reader, count, writer, id_offset):
header = reader.next()
if id_offset and header[0] in self._feature_names:
raise ValueError('id field conflict: {}'.format(header[0]))
writer.writerow(header)
for row in reader:
if id_offset:
row_id = row[0]
conditioning_row = self.encode_row(row, header)
to_sample = [value is None for value in conditioning_row]
samples = self._query_server.sample(
to_sample,
conditioning_row,
count)
for sample in samples:
print sample
sample = self.decode_row(sample, header)
if id_offset:
sample[0] = row_id
writer.writerow(sample)
def relate(self, columns, result_out=None, sample_count=SAMPLE_COUNT):
'''
Compute pairwise related scores between all pairs (f1,f2) of columns
where f1 in input columns and f2 in all_features.
Inputs:
columns - a list of target feature names. a mix of fluent and basic
features is allowed
result_out - filename/file handle/StringIO of output relatedness,
or None to return a csv string
sample_count - number of samples in Monte Carlo computations;
increasing sample_count increases accuracy
Outputs:
A csv with columns corresponding to input columns and one row
per dataset feature. The value in each cell is a relatedness
number in [0,1] with 0 meaning independent and 1 meaning
highly related.
Related scores are defined in terms of mutual information via
loom.preql.normalize_mutual_information. For multivariate Gaussian
data, relatedness equals Pearson's correlation; for non-Gaussian
and discrete data, relatedness captures dependence in a more
general way.
Example:
>>> print preql.relate(['feature0', 'feature2'])
,feature0,feature2
feature0,1.0,0.5
feature1,0.0,0.5
feature2,0.5,1.0
'''
target_feature_sets = [self.encode_set([f]) for f in columns]
query_feature_sets = [self.encode_set([f]) for f in columns]
conditioning_row = self.encode_row(None)
with csv_output(result_out) as writer:
self._relate(
target_feature_sets,
query_feature_sets,
conditioning_row,
writer,
sample_count)
return writer.result()
def refine(
self,
target_feature_sets=None,
query_feature_sets=None,
conditioning_row=None,
result_out=None,
sample_count=SAMPLE_COUNT):
'''
Determine which queries would inform target features, in context.
Specifically, compute a matrix of values relatedness values
[[r(t,q) for q in query_feature_sets] for t in target_feature_sets]
conditioned on conditioning_row.
Inputs:
target_feature_sets - list of disjoint sets of feature names;
defaults to [[f] for f unobserved in conditioning_row]
query_feature_sets - list of disjoint sets of feature names;
defaults to [[f] for f unobserved in conditioning_row]
conditioning_row - a data row or dict of contextual information
result_out - filename/file handle/StringIO of output data,
or None to return a csv string
sample_count - number of samples in Monte Carlo computations;
increasing sample_count increases accuracy
Outputs:
A csv with columns corresponding to query_feature_sets and
rows corresponding to target_feature_sets. The value in each cell
is a relatedness number in [0,1] with 0 meaning independent and 1
meaning highly related. See help(PreQL.relate) for details.
Rows and columns will be labeled by the lexicographically-first
feature in the respective set.
Example:
>>> print preql.refine(
[['f0', 'f1'], ['f2']],
[['f0', 'f1'], ['f2'], ['f3']],
[None, None, None, 1.0])
,f0,f2,f3
f0,1.,0.9,0.5
f2,0.8,1.,0.8
'''
conditioning_row = self.encode_row(conditioning_row)
fc_zip = zip(self._feature_names, conditioning_row)
if target_feature_sets is None:
target_feature_sets = [[f] for f, c in fc_zip if c is None]
if query_feature_sets is None:
query_feature_sets = [[f] for f, c in fc_zip if c is None]
target_feature_sets = map(self.encode_set, target_feature_sets)
query_feature_sets = map(self.encode_set, query_feature_sets)
unobserved_features = frozenset.union(*target_feature_sets) | \
frozenset.union(*query_feature_sets)
mismatches = []
for feature, condition in fc_zip:
if feature in unobserved_features and condition is not None:
mismatches.append(feature)
if mismatches:
raise ValueError(
'features {} must be None in conditioning row {}'.format(
mismatches,
conditioning_row))
self._validate_feature_sets(target_feature_sets)
self._validate_feature_sets(query_feature_sets)
with csv_output(result_out) as writer:
self._relate(
target_feature_sets,
query_feature_sets,
conditioning_row,
writer,
sample_count)
return writer.result()
def support(
self,
target_feature_sets=None,
observed_feature_sets=None,
conditioning_row=None,
result_out=None,
sample_count=SAMPLE_COUNT):
'''
Determine which observed features most inform target features,
in context.
Specifically, compute a matrix of values relatedness values
[[r(t,o | conditioning_row - o) for o in observed_feature_sets]
for t in target_feature_sets]
Where `conditioning_row - o` denotes the `conditioning_row`
with feature `o` set to unobserved.
Note that both features in observed and features in target
must be observed in conditioning row.
Inputs:
target_feature_sets - list of disjoint sets of feature names;
defaults to [[f] for f observed in conditioning_row]
observed_feature_sets - list of disjoint sets of feature names;
defaults to [[f] for f observed in conditioning_row]
conditioning_row - a data row of contextual information
result_out - filename/file handle/StringIO of output data,
or None to return a csv string
sample_count - number of samples in Monte Carlo computations;
increasing sample_count increases accuracy
Outputs:
A csv with columns corresponding to observed_feature_sets and
rows corresponding to target_feature_sets. The value in each cell
is a relatedness number in [0,1] with 0 meaning independent and 1
meaning highly related. See help(PreQL.relate) for details.
Rows and columns will be labeled by the lexicographically-first
feature in the respective set.
Example:
>>> print preql.support(
[['f0', 'f1'], ['f3']],
[['f0', 'f1'], ['f2'], ['f3']],
['a', 7, None, 1.0])
,f0,f2,f3
f0,1.,0.9,0.5
f3,0.8,0.8,1.0
'''
conditioning_row = self.encode_row(conditioning_row)
if all(c is None for c in conditioning_row):
raise ValueError(
'conditioning row must have at least one observation')
fc_zip = zip(self._feature_names, conditioning_row)
if target_feature_sets is None:
target_feature_sets = [[f] for f, c in fc_zip if c is not None]
if observed_feature_sets is None:
observed_feature_sets = [[f] for f, c in fc_zip if c is not None]
target_feature_sets = map(self.encode_set, target_feature_sets)
observed_feature_sets = map(self.encode_set, observed_feature_sets)
self._validate_feature_sets(target_feature_sets)
self._validate_feature_sets(observed_feature_sets)
observed_features = frozenset.union(*target_feature_sets) | \
frozenset.union(*observed_feature_sets)
mismatches = []
for feature, condition in fc_zip:
if feature in observed_features and condition is None:
mismatches.append(feature)
if mismatches:
raise ValueError(
'features {} must not be None in conditioning row {}'.format(
mismatches,
conditioning_row))
with csv_output(result_out) as writer:
self._relate(
target_feature_sets,
observed_feature_sets,
conditioning_row,
writer,
sample_count)
return writer.result()
def _relate(
self,
target_feature_sets,
query_feature_sets,
conditioning_row,
writer,
sample_count):
'''
Compute all pairwise related scores between target_set
and query_set
In general it is assumed that all features in the target set
and query set are unobserved in the conditioning row. If a feature
is not unobserved, all related scores involving that feature will be
computed with respect to a conditioning row with that feature set to
unobserved.
'''
for tfs in target_feature_sets:
for qfs in query_feature_sets:
if tfs != qfs and tfs.intersection(qfs):
raise ValueError('target features and query features'
' must be disjoint or equal:'
' {} {}'.format(tfs, qfs))
target_sets = map(self._cols_to_mask, target_feature_sets)
query_sets = map(self._cols_to_mask, query_feature_sets)
target_labels = map(min, target_feature_sets)
query_labels = map(min, query_feature_sets)
entropys = self._query_server.entropy(
row_sets=target_sets,
col_sets=query_sets,
conditioning_row=conditioning_row,
sample_count=sample_count)
writer.writerow([None] + query_labels)
for target_label, target_set in izip(target_labels, target_sets):
result_row = [target_label]
for query_set in query_sets:
if target_set == query_set:
normalized_mi = 1.0
else:
forgetful_conditioning_row = copy(conditioning_row)
for feature_index in target_set | query_set:
forgetful_conditioning_row[feature_index] = None
if forgetful_conditioning_row != conditioning_row:
normalized_mi = self._normalized_mutual_information(
target_set,
query_set,
entropys=None,
conditioning_row=forgetful_conditioning_row,
sample_count=sample_count)
else:
normalized_mi = self._normalized_mutual_information(
target_set,
query_set,
entropys=entropys,
sample_count=sample_count)
result_row.append(normalized_mi)
writer.writerow(result_row)
def group(self, column, result_out=None):
'''
Compute consensus grouping for a single column.
Inputs:
column - name of a target feature to group by
result_out - filename/file handle/StringIO of output groupings,
or None to return a csv string
Outputs:
A csv file with columns [row_id, group_id, confidence]
with one row per dataset row. Confidence is a real number in [0,1]
meaning how confident a row is to be in a given group. Each row_id
appears exactly once. Csv rows are sorted lexicographically by
group_id, then confidence. Groupids are nonnegative integers.
Larger groupids are listed first, so group 0 is the largest.
Example:
>>> print preql.group('feature0')
row_id,group_id,confidence
5,0,1.0
3,0,0.5
9,0,0.1
2,1,0.9
4,1,0.1
0,2,0.4
'''
with csv_output(result_out) as writer:
self._group(column, writer)
return writer.result()
def _group(self, column, writer):
root = self._query_server.root
feature_pos = self._name_to_pos[column]
result = loom.group.group(root, feature_pos)
rowid_map = self.rowid_map
writer.writerow(loom.group.Row._fields)
for row in result:
external_id = rowid_map[row.row_id]
writer.writerow((external_id, row.group_id, row.confidence))
def similar(self, rows, rows2=None, row_limit=None, result_out=None):
'''
Compute pairwise similarity scores for all rows
Inputs:
rows - a list of data rows or dicts
rows2 - a optional second list of data rows or dicts
Outputs:
A csv file with columns and rows denoting rows and
entries ij giving the similarity score between row i
and row j.
'''
rows = map(self.encode_row, rows)
if rows2 is not None:
rows2 = map(self.encode_row, rows2)
else:
rows2 = rows
with csv_output(result_out) as writer:
self._similar(rows, rows2, row_limit, writer)
return writer.result()
def _similar(self, update_rows, score_rows, row_limit, writer):
score_ids = set()
update_row_results = []
for update_row in update_rows:
results = self._query_server.score_derivative(
update_row,
score_rows,
row_limit=row_limit)
results_dict = dict(results)
update_row_results.append(results_dict)
score_ids = score_ids.union(set(results_dict.keys()))
for results in update_row_results:
writer.writerow([results[_id] for _id in score_ids])
def search(self, row, row_limit=None, result_out=None):
'''
Find the top n most similar rows to `row` in the dataset.
Inputs:
row - a data row or dict
Outputs
A csv file with with columns row_id, score, showing the
top 1000 most search rows in the dataset, sorted by score
'''
row = self.encode_row(row)
with csv_output(result_out) as writer:
self._search(row, row_limit, writer)
return writer.result()
def _search(self, row, row_limit, writer):
results = self._query_server.score_derivative(
row,
score_rows=None,
row_limit=row_limit)
# FIXME map through erf
writer.writerow(('row_id', 'score'))
for row_id, score in results:
external_id = self.rowid_map[row_id]
writer.writerow((external_id, score))
def cluster(
self,
rows_to_cluster=None,
seed_rows=None,
cluster_count=None,
nearest_neighbors=10):
if seed_rows is None:
seed_rows = self._query_server.sample(
[None for _ in self.feature_names],
sample_count=SAMPLE_COUNT)
row_limit = len(seed_rows) ** 2 + 1
similar_string = StringIO(self.similar(seed_rows, row_limit=row_limit))
similar = numpy.genfromtxt(
similar_string,
delimiter=',',
skip_header=0)
similar = similar.clip(0., 5.)
similar = numpy.exp(similar)
clustering = SpectralClustering(
n_clusters=cluster_count,
affinity='precomputed')
labels = clustering.fit_predict(similar)
if rows_to_cluster is None:
return zip(labels, seed_rows)
else:
row_labels = []
for row in rows_to_cluster:
similar_scores = self.similar(
[row],
seed_rows,
row_limit=row_limit)
similar_scores = numpy.genfromtxt(
StringIO(similar_scores),
delimiter=',',
skip_header=0)
assert len(similar_scores) == len(labels)
label_scores = zip(similar_scores, labels)
top = sorted(label_scores, reverse=True)[:nearest_neighbors]
label_counts = Counter(zip(*top)[1]).items()
top_label = sorted(label_counts, key=lambda x: -x[1])[0][0]
row_labels.append(top_label)
return zip(row_labels, rows_to_cluster)
def normalize_mutual_information(mutual_info):
'''
Recall that mutual information
I(X; Y) = H(X) + H(Y) - H(X, Y)
satisfies:
I(X; Y) >= 0
I(X; Y) = 0 iff p(x, y) = p(x) p(y) # independence
Definition: Define the "relatedness" of X and Y by
r(X, Y) = sqrt(1 - exp(-2 I(X; Y)))
= sqrt(1 - exp(-I(X; Y))^2)
= sqrt(1 - exp(H(X,Y) - H(X) - H(Y))^2)
Theorem: Assume X, Y have finite entropy. Then
(1) 0 <= r(X, Y) < 1
(2) r(X, Y) = 0 iff p(x, y) = p(x) p(y)
(3) r(X, Y) = r(Y, X)
Proof: Abbreviate I = I(X; Y) and r = r(X, Y).
(1) Since I >= 0, exp(-2 I) in (0, 1], and r in [0, 1).
(2) r(X, Y) = 0 iff I(X; Y) = 0 iff p(x, y) = p(x) p(y)
(3) r is symmetric since I is symmetric. []
Theorem: If (X,Y) ~ MVN(mu, sigma_x, sigma_y, rho) in terms of
standard deviations and Pearson's correlation coefficient,
then r(X,Y) = rho.
Proof: The covariance matrix is
Sigma = [ sigma_x^2 sigma_x sigma_y rho ]
[ sigma_x sigma_y rho sigma_y^2 ]
with determinant det Sigma = sigma_x^2 sigma_y^2 (1 - rho^2).
The mutual information is thus
I(X;Y) = H(X) + H(Y) - H(X,Y)
= log(2 pi e)/2 + log sigma_x
+ log(2 pi e)/2 + log sigma_y
- log(2 pi e) - 1/2 log det Sigma
= -1/2 log (1 - rho^2)
= -log sqrt(1 - rho^2)
whence
r(X,Y) = sqrt(1 - exp(-I(X;Y)) ** 2)
= sqrt(1 - exp(-2 I(X;Y)))
= rho []
'''
mutual_info = max(mutual_info, 0) # account for roundoff error
r = (1.0 - math.exp(-2.0 * mutual_info)) ** 0.5
assert 0 <= r and r < 1, r
return r
def get_server(root, encoding=None, debug=False, profile=None, config=None):
query_server = loom.query.get_server(root, config, debug, profile)
return PreQL(query_server, encoding)
| bsd-3-clause |
dmitriz/zipline | tests/history_cases.py | 5 | 21413 | """
Test case definitions for history tests.
"""
import pandas as pd
import numpy as np
from zipline.finance.trading import TradingEnvironment, noop_load
from zipline.history.history import HistorySpec
from zipline.protocol import BarData
from zipline.utils.test_utils import to_utc
_cases_env = TradingEnvironment(load=noop_load)
def mixed_frequency_expected_index(count, frequency):
"""
Helper for enumerating expected indices for test_mixed_frequency.
"""
minute = MIXED_FREQUENCY_MINUTES[count]
if frequency == '1d':
return [_cases_env.previous_open_and_close(minute)[1], minute]
elif frequency == '1m':
return [_cases_env.previous_market_minute(minute), minute]
def mixed_frequency_expected_data(count, frequency):
"""
Helper for enumerating expected data test_mixed_frequency.
"""
if frequency == '1d':
# First day of this test is July 3rd, which is a half day.
if count < 210:
return [np.nan, count]
else:
return [209, count]
elif frequency == '1m':
if count == 0:
return [np.nan, count]
else:
return [count - 1, count]
MIXED_FREQUENCY_MINUTES = _cases_env.market_minute_window(
to_utc('2013-07-03 9:31AM'), 600,
)
ONE_MINUTE_PRICE_ONLY_SPECS = [
HistorySpec(1, '1m', 'price', True, _cases_env, data_frequency='minute'),
]
DAILY_OPEN_CLOSE_SPECS = [
HistorySpec(3, '1d', 'open_price', False, _cases_env,
data_frequency='minute'),
HistorySpec(3, '1d', 'close_price', False, _cases_env,
data_frequency='minute'),
]
ILLIQUID_PRICES_SPECS = [
HistorySpec(3, '1m', 'price', False, _cases_env, data_frequency='minute'),
HistorySpec(5, '1m', 'price', True, _cases_env, data_frequency='minute'),
]
MIXED_FREQUENCY_SPECS = [
HistorySpec(1, '1m', 'price', False, _cases_env, data_frequency='minute'),
HistorySpec(2, '1m', 'price', False, _cases_env, data_frequency='minute'),
HistorySpec(2, '1d', 'price', False, _cases_env, data_frequency='minute'),
]
MIXED_FIELDS_SPECS = [
HistorySpec(3, '1m', 'price', True, _cases_env, data_frequency='minute'),
HistorySpec(3, '1m', 'open_price', True, _cases_env,
data_frequency='minute'),
HistorySpec(3, '1m', 'close_price', True, _cases_env,
data_frequency='minute'),
HistorySpec(3, '1m', 'high', True, _cases_env, data_frequency='minute'),
HistorySpec(3, '1m', 'low', True, _cases_env, data_frequency='minute'),
HistorySpec(3, '1m', 'volume', True, _cases_env, data_frequency='minute'),
]
HISTORY_CONTAINER_TEST_CASES = {
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
'test one minute price only': {
# A list of HistorySpec objects.
'specs': ONE_MINUTE_PRICE_ONLY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequency of updates to the container
'updates': [
BarData(
{
1: {
'price': 5,
'dt': to_utc('2013-06-21 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 6,
'dt': to_utc('2013-06-21 9:32AM'),
},
},
),
],
# Expected results
'expected': {
ONE_MINUTE_PRICE_ONLY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [5],
},
index=[
to_utc('2013-06-21 9:31AM'),
],
),
pd.DataFrame(
data={
1: [6],
},
index=[
to_utc('2013-06-21 9:32AM'),
],
),
],
},
},
'test daily open close': {
# A list of HistorySpec objects.
'specs': DAILY_OPEN_CLOSE_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'open_price': 10,
'close_price': 11,
'dt': to_utc('2013-06-21 10:00AM'),
},
},
),
BarData(
{
1: {
'open_price': 12,
'close_price': 13,
'dt': to_utc('2013-06-21 3:30PM'),
},
},
),
BarData(
{
1: {
'open_price': 14,
'close_price': 15,
# Wait a full market day before the next bar.
# We should end up with nans for Monday the 24th.
'dt': to_utc('2013-06-25 9:31AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
# open
DAILY_OPEN_CLOSE_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [10, np.nan, 14]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
# close
DAILY_OPEN_CLOSE_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 11]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 13]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [13, np.nan, 15]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
},
},
'test illiquid prices': {
# A list of HistorySpec objects.
'specs': ILLIQUID_PRICES_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': 10,
'dt': to_utc('2013-06-28 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 11,
'dt': to_utc('2013-06-28 9:32AM'),
},
},
),
BarData(
{
1: {
'price': 12,
'dt': to_utc('2013-06-28 9:33AM'),
},
},
),
BarData(
{
1: {
'price': 13,
# Note: Skipping 9:34 to simulate illiquid bar/missing
# data.
'dt': to_utc('2013-06-28 9:35AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
ILLIQUID_PRICES_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [10, 11, 12],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
pd.DataFrame(
data={
1: [12, np.nan, 13],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
ILLIQUID_PRICES_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:57PM'),
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10, 11, 12],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
# The 12 value from 9:33 should be forward-filled.
pd.DataFrame(
data={
1: [10, 11, 12, 12, 13],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
},
},
'test mixed frequencies': {
# A list of HistorySpec objects.
'specs': MIXED_FREQUENCY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
'dt': to_utc('2013-07-03 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': count,
'dt': dt,
}
}
)
for count, dt in enumerate(MIXED_FREQUENCY_MINUTES)
],
# Dictionary mapping spec_key -> list of expected outputs.
'expected': {
MIXED_FREQUENCY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [count],
},
index=[minute],
)
for count, minute in enumerate(MIXED_FREQUENCY_MINUTES)
],
MIXED_FREQUENCY_SPECS[1].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1m'),
},
index=mixed_frequency_expected_index(count, '1m'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
],
MIXED_FREQUENCY_SPECS[2].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1d'),
},
index=mixed_frequency_expected_index(count, '1d'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
]
},
},
'test multiple fields and sids': {
# A list of HistorySpec objects.
'specs': MIXED_FIELDS_SPECS,
# Sids for the test.
'sids': [1, 10],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'dt': dt,
'price': count,
'open_price': count,
'close_price': count,
'high': count,
'low': count,
'volume': count,
},
10: {
'dt': dt,
'price': count * 10,
'open_price': count * 10,
'close_price': count * 10,
'high': count * 10,
'low': count * 10,
'volume': count * 10,
},
},
)
for count, dt in enumerate([
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
# NOTE: No update for 9:34
to_utc('2013-06-28 9:35AM'),
])
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': dict(
# Build a dict from a list of tuples. Doing it this way because
# there are two distinct cases we want to test: forward-fillable
# fields and non-forward-fillable fields.
[
(
# Non forward-fill fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, np.nan, 3],
10: [20, np.nan, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
# For volume, when we are missing data, we replace
# it with 0s to show that no trades occured.
).fillna(0 if 'volume' in key else np.nan),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field not in HistorySpec.FORWARD_FILLABLE]
] +
# Concatenate the expected results for non-ffillable with
# expected result for ffillable.
[
(
# Forward-fillable fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, 2, 3],
10: [20, 20, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field in HistorySpec.FORWARD_FILLABLE]
]
),
},
}
| apache-2.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/backends/backend_pdf.py | 10 | 94712 | # -*- coding: iso-8859-1 -*-
"""
A PDF matplotlib backend
Author: Jouni K Sepp�nen <[email protected]>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
import codecs
import os
import re
import sys
import time
import warnings
import zlib
import numpy as np
from six import unichr
from six import BytesIO
from datetime import datetime
from math import ceil, cos, floor, pi, sin
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * the alpha channel of images
# * image compression could be improved (PDF supports png-like compression)
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()':
return b'\\' + m
elif m == b'\n':
return br'\n'
elif m == b'\r':
return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (six.integer_types, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, six.text_type):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(val)
for key, val in six.iteritems(obj)])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
msg = "Don't know a PDF representation for %s objects." % type(obj)
raise TypeError(msg)
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + six.text_type(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
# PDF operators (not an exhaustive list)
_pdfops = dict(
close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**dict([(name, Operator(value))
for name, value in six.iteritems(_pdfops)]))
def _paint_path(closep, fillp, strokep):
"""Return the PDF operator to paint a path in the following way:
closep: close the path before painting
fillp: fill the path with the fill color
strokep: stroke the outline of the path with the line color"""
if strokep:
if closep:
if fillp:
return Op.close_fill_stroke
else:
return Op.close_stroke
else:
if fillp:
return Op.fill_stroke
else:
return Op.stroke
else:
if fillp:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header """
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression']:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename):
self.nextObject = 1 # next free object id
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if is_string_like(filename):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
revision = ''
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend%s' % revision,
'CreationDate': datetime.today()
}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # information on dvi fonts
self.type1Descriptors = {} # differently encoded Type-1 fonts may
# share the same descriptor
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.gouraudTriangles = []
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
# The PDF spec recommends to include every procset
procsets = [Name(x)
for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')}
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
def close(self):
self.endStream()
# Write out the various deferred objects
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in six.itervalues(self.alphaStates)]))
self.writeHatches()
self.writeGouraudTriangles()
xobjects = dict(six.itervalues(self.images))
for tup in six.itervalues(self.markers):
xobjects[tup[0]] = tup[1]
for name, value in six.iteritems(self.multi_byte_charprocs):
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
elif self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill(list(map(pdfRepr, data))))
self.write(b'\n')
def beginStream(self, id, len, extra=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename (or dvi name) of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %r' % (Fx, filename),
'debug')
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in six.iteritems(self.fontNames):
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
elif filename in self.dviFontInfo:
# a Type 1 font from a dvi file;
# the filename is really the TeX name
matplotlib.verbose.report('Writing Type-1 font', 'debug')
fonts[Fx] = self.embedTeXFont(filename,
self.dviFontInfo[filename])
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedTeXFont(self, texname, fontinfo):
msg = ('Embedding TeX font ' + texname + ' - fontinfo=' +
repr(fontinfo.__dict__))
matplotlib.verbose.report(msg, 'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [Name(ch) for ch in enc]
differencesArray = [0] + differencesArray
fontdict['Encoding'] = \
{'Type': Name('Encoding'),
'Differences': differencesArray}
# If no file is specified, stop short
if fontinfo.fontfile is None:
msg = ('Because of TeX configuration (pdftex.map, see updmap '
'option pdftexDownloadBase14) the font {0} is not '
'embedded. This is deprecated as of PDF 1.5 and it may '
'cause the consumer application to show something that '
'was not intended.').format(fontinfo.basefont)
warnings.warn(msg)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = FT2Font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
#'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(filename)
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest:
return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0:
return floor(value)
else:
return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed
# to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
s = decode_char(charcode)
width = font.load_char(
s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(
filename.encode(sys.getfilesystemencoding()), glyph_ids)
charprocs = {}
for charname, stream in six.iteritems(rawcharprocs):
charprocDict = {'Length': len(stream)}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data:
break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\u0000'] * 65536
cmap = font.get_charmap()
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if sf & ITALIC:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
msg = ("'%s' can not be subsetted into a Type 3 font. "
"The entire font will be embedded in the output.")
warnings.warn(msg % os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
face, edge, hatch = hatch_style
if face is not None:
face = tuple(face)
if edge is not None:
edge = tuple(edge)
hatch_style = (face, edge, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res})
# lst is a tuple of stroke color, fill color,
# number of - lines, number of / lines,
# number of | lines, number of \ lines
rgb = hatch_style[0]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_stroke)
if hatch_style[1] is not None:
rgb = hatch_style[1]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(0.1, Op.setlinewidth)
# TODO: We could make this dpi-dependent, but that would be
# an API change
self.output(*self.pathOperations(
Path.hatch(hatch_style[2]),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[(str('flags'), str('u1')),
(str('points'), str('>u4'), (2,)),
(str('colors'), str('u1'), (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
## These two from backend_ps.py
## TODO: alpha (SMask, p. 518 of pdf spec)
def _rgb(self, im):
h, w, s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:, :, :3]
a = rgba[:, :, 3:]
return h, w, rgb.tostring(), a.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(np.float32)
r = rgba_f[:, :, 0]
g = rgba_f[:, :, 1]
b = rgba_f[:, :, 2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def writeImages(self):
for img, pair in six.iteritems(self.images):
img.flipud_out()
if img.is_grayscale:
height, width, data = self._gray(img)
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8})
# TODO: predictors (i.e., output png)
self.currentstream.write(data)
self.endStream()
else:
height, width, data, adata = self._rgb(img)
smaskObject = self.reserveObject("smask")
self.beginStream(
smaskObject.id,
self.reserveObject('length of smask stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8})
# TODO: predictors (i.e., output png)
self.currentstream.write(adata)
self.endStream()
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceRGB'), 'BitsPerComponent': 8,
'SMask': smaskObject})
# TODO: predictors (i.e., output png)
self.currentstream.write(data)
self.endStream()
img.flipud_out()
def markerObject(self, path, trans, fillp, strokep, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fillp), bool(strokep), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fillp, strokep, joinstyle, capstyle),
(name, ob, bbox, lw)) in six.iteritems(self.markers):
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(False, fillp, strokep))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(False, filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
cmds = []
last_points = None
for points, code in path.iter_segments(transform, clip=clip,
simplify=simplify,
sketch=sketch):
if code == Path.MOVETO:
# This is allowed anywhere in the path
cmds.extend(points)
cmds.append(Op.moveto)
elif code == Path.CLOSEPOLY:
cmds.append(Op.closepath)
elif last_points is None:
# The other operations require a previous point
raise ValueError('Path lacks initial MOVETO')
elif code == Path.LINETO:
cmds.extend(points)
cmds.append(Op.lineto)
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
cmds.extend(points[2:])
cmds.append(Op.curveto)
elif code == Path.CURVE4:
cmds.extend(points)
cmds.append(Op.curveto)
last_points = points
return cmds
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name),
file=sys.stderr)
borken = True
else:
if name == 'the zero object':
key = "f"
else:
key = "n"
text = "%010d %05d %s \n" % (offset, generation, key)
self.write(text.encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
is_date = lambda x: isinstance(x, datetime)
check_trapped = (lambda x: isinstance(x, Name) and
x.name in ('True', 'False', 'Unknown'))
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in six.iterkeys(self.infoDict):
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" %
self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, six.string_types):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
self.check_gc(gc)
h, w = im.get_size_out()
if dx is None:
w = 72.0*w/self.image_dpi
else:
w = dx
if dy is None:
h = 72.0*h/self.image_dpi
else:
h = dy
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.to_values()
self.file.output(Op.gsave,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fillp = gc.fillp(rgbFace)
strokep = gc.strokep()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fillp, strokep, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if (x < 0 or
y < 0 or
x > self.file.width * 72 or
y > self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output(cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, 72)
page = six.next(iter(dvi))
dvi.close()
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.fontName(dvifont.texname)
if dvifont.texname not in self.file.dviFontInfo:
psfont = self.tex_font_mapping(dvifont.texname)
self.file.dviFontInfo[dvifont.texname] = Bunch(
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
# We need to convert the glyph numbers to bytes, and the easiest
# way to do this on both Python 2 and 3 is .encode('latin-1')
seq += [['text', x1, y1,
[six.unichr(glyph).encode('latin-1')], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = six.text_type(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1
and chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show,
Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype),
Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode,
flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(filename)
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / 72.0, self.file.height / 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def strokep(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fillp(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def close_and_paint(self):
"""
Return the appropriate pdf operator to close the path and
cause it to be stroked, filled, or both.
"""
return Op.paint_path(True, self.fillp(), self.strokep())
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(False, self.fillp(), self.strokep())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (self._rgb, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if (ours is None or theirs is None):
different = bool(not(ours is theirs))
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
if different:
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPdf(figure)
manager = FigureManagerPdf(canvas, num)
return manager
class PdfPages(object):
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
forgetting the format argument.
"""
__slots__ = ('_file', 'keep_empty')
def __init__(self, filename, keep_empty=True):
"""
Create a new PdfPages object.
Parameters
----------
filename: str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. The file is opened at once and any older file with
the same name is overwritten.
keep_empty: bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
"""
self._file = PdfFile(filename)
self.keep_empty = keep_empty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.close()
if (self.get_pagecount() == 0 and not self.keep_empty
and not self._file.passed_in_file_object):
os.remove(self._file.fh.name)
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure: :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf',
**kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
fixed_dpi = 72
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename)
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererPdf(file, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureCanvas = FigureCanvasPdf
FigureManager = FigureManagerPdf
| mit |
evan-magnusson/dynamic | Data/Calibration/Firm_Calibration_Python/data_structures/data_class.py | 6 | 10030 | '''
Data Structures (data_class.py):
-------------------------------------------------------------------------------
Last updated 6/24/2015
This module defines data structures in order to keep track of firm data that
is categorized by NAICS codes.
Dealing with this data is made particularly difficult by the levels of
detail that firms can be differentiated on.
Different data sources consequently have varying levels of specificity.
In order to deal with this, the module creates a *NAICS tree* data structure.
The :term:`NAICS tree` is a standard tree data structure with each node
corresponding to an NAICS industry.
The nodes are coded in as custom "industry" objects.
The industry object has a list of sub-industries and a custom pandas dataframes
object. The pandas dataframes object has a dictionary of pandas dataframes as
well as custom functions for maintaining it.
'''
# Packages:
import pandas as pd
import numpy as np
'''
-------------------------------------------------------------------------------
Objects defined here:
pd_dfs (pandas dataframes): A dictionary of pandas dataframes.
industry: A list of sub-industries, as well as a pd_dfs for pertinent data.
tree: A tree of industry objects. Has a root that aggregates all the
industries, as well as a list of all industries in the tree.
-------------------------------------------------------------------------------
class pd_dfs: This Defines an object that contains a list of pandas dataframes.
dfs: A dictionary of panda dataframes.
n: The number of pandas dataframes in the list.
-------------------------------------------------------------------------------
'''
class pd_dfs:
"""
This "pandas dataframes" object has one member: a dictionary of pandas
dataframes. The class has functions for reading in and maintaining this.
:param args: Data to initialize the dictionary with. This is either a
dictionary of pandas dataframes, or tuple/list of keys
alternated with pandas dataframes.
"""
def __init__(self, *args):
# Initialize the dictionary:
self.dfs = {}
self.append(args)
def append(self, *args):
""" Appending to the dictionary of pandas dataframe.
:param args: Data to be appendend. This is either a dictionary of
pandas dataframes, or tuple/list of keys alternated with
pandas dataframes.
"""
# *args may be nested in tuples as it goes through multiple functions:
while len(args) == 1 and isinstance(args[0], (list,tuple)):
args = args[0]
# If the input is a dictionary:
if len(args) > 0 and isinstance(args[0], dict):
for key in args[0]:
self.dfs[key] = args[0][key]
return None
# If the input is a list or tuple alternating between keys and pd_dfs:
for i in xrange(len(args)):
if isinstance(args[i], (list,tuple)):
self.dfs[args[i][0]] = args[i][1]
else:
if i%2 == 0:
self.dfs[args[i]] = args[i+1]
def delete(self, keys=None):
""" Deleting elements in dictionary of pandas dataframe.
:param keys: A list of keys to be deleted."""
for key in keys:
try:
del self.dfs[key]
except KeyError:
pass
class industry:
'''
This object represents an industry. It has a list of the NAICS codes of
the sub-industries as well as a pandas dataframes object.
:param sub_ind: A list of sub-industries of this industry.
:param args: Data to initialize the industry with. This is either a
dictionary of pandas dataframes, or tuple/list of keys
alternated with pandas dataframes.
'''
def __init__(self, sub_ind, *args):
self.sub_ind = sub_ind
# Initialize the data:
self.data = pd_dfs(args)
def append_dfs(self, *args):
''' Append data.
:param args: Data to append the industry with. This is either a
dictionary of pandas dataframes, or tuple/list of keys
alternated with pandas dataframes.
'''
self.data.append(args)
def delete_df(self, keys):
''' Delete data.
:param args: Keys corresponding to the dataframes to be deleted.
'''
self.data.delete(keys)
class tree:
"""
Defines a tree where each node is an industry. The tree has a root,
a list of all the industries, and a matching from each index of an industry
to the index of the corresponding parent.
:param path: The path of a csv file that has one column of NAICS codes.
.. note:: In the input csv file, industries with multiple NAICS codes
**must** separate the codes using periods (".").
Anything besides digits and periods will make the function crash.
.. note:: The input csv file must have "Codes:" as a header on the
first row of the first column.
:param root: An industry object corresponding to the aggregate of all the
industries. This should have a NAICS code of '1'.
:param enumeration: An enumeration of all the industries.
:param par: A matching from each index of an industry to the index of the
corresponding parent.
"""
def __init__(self, path="", root=None,
enum_inds=None, par=None):
if path != "":
self = self.load_naics(path)
else:
self.root = root
if enum_inds == None:
enum_inds = pd.DataFrame(np.zeros((0,0)))
self.enum_inds = [industry([]) for i in xrange(0,len(enum_inds))]
self.par = par
def append_all(self, df_nm, df_cols):
''' Appends pandas dataframe to all industries in a tree.
This dataframe has dimensions 1xlen(df_cols), and corresponds to key
df_nm in the dataframes dictionary.
:param root: An industry object that aggregates over all the industries.
:param enumeration: An enumeration of all the industries.
'''
for i in self.enum_inds:
i.data.append((df_nm, pd.DataFrame(np.zeros((1, len(df_cols))),
columns=df_cols)))
def load_naics(self, path):
'''
This function takes a csv file that is a column of NAICS codes and
generates a *NAICS tree*.
:param path: The path of a csv file that has one column of NAICS codes.
.. note:: In the input csv file, industries with multiple NAICS codes
**must** separate the codes using periods (".") in the csv file.
Anything besides digits and periods will make the function crash.
.. note:: The input csv file must have "Codes:" as a header on the
first row of the first column.
'''
# Reading in a list of naics codes:
naics_codes = pd.read_csv(path).fillna(0)
rows = naics_codes.shape[0]
# Initializing the naics tree:
self.enum_inds = [industry([]) for i in xrange(0,rows)]
self.root = self.enum_inds[0]
self.par = [0]*rows
# Read the naics codes into the tree:
for i in xrange(0, rows):
cur_codes = pd.DataFrame(naics_codes.iloc[i,0].split("-"))
if(cur_codes.shape[0] == 2):
cur_codes = pd.DataFrame(range(int(cur_codes.iloc[0,0]),
int(cur_codes.iloc[1,0])+1))
self.enum_inds[i].append_dfs(("Codes:", cur_codes))
cur_rows = self.enum_inds[i].data.dfs["Codes:"].shape[0]
for j in xrange(0, cur_rows):
code = int(self.enum_inds[i].data.dfs["Codes:"].iloc[j,0])
self.enum_inds[i].data.dfs["Codes:"].iloc[j,0] = code
# Creating the tree structure:
# "levels" keeps track of the path from the root to the current industry.
levels = [None]
levels[0] = self.enum_inds[0]
levels_index = [0]
cur_lvl = 0
# Going through every industry in the tree and finding the parent/children:
for i in xrange(1,rows):
cur_ind = self.enum_inds[i]
cur_codes = cur_ind.data.dfs["Codes:"]
cur_rows = cur_codes.shape[0]
par_found = False
while not par_found:
prev_ind = levels[cur_lvl]
prev_codes = prev_ind.data.dfs["Codes:"]
prev_rows = prev_codes.shape[0]
for j in xrange(0, cur_rows):
for k in xrange(0, prev_rows):
if cur_lvl == 0:
# Then the industry's parent is the root.
par_found = True
cur_lvl += 1
levels.append(cur_ind)
levels_index.append(i)
levels[0].sub_ind.append(cur_ind)
self.par[i] = levels_index[cur_lvl-1]
break
elif str(prev_codes.iloc[k,0]) in str(cur_codes.iloc[j,0]):
# Then "levels[cur_lvl]" is the parent of "cur_ind":
par_found = True
cur_lvl += 1
levels.append(cur_ind)
levels_index.append(i)
prev_ind.sub_ind.append(cur_ind)
self.par[i] = levels_index[cur_lvl-1]
break
if(par_found):
break
if not par_found:
del levels[cur_lvl]
del levels_index[cur_lvl]
cur_lvl -= 1
return self
| mit |
rohanp/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
anntzer/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 11 | 25871 | """
Todo: cross-check the F-value with stats model
"""
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_warns_message
from sklearn.utils import safe_mask
from sklearn.datasets import make_classification, make_regression
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert np.allclose(f, f2)
assert np.allclose(pv, pv2)
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert sparse.issparse(X_r2inv)
support_mask = safe_mask(X_r2inv, support)
assert X_r2inv.shape == X.shape
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert X_r2inv.getnnz() == X_r.getnnz()
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert X_selected.shape == (20, 0)
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_almost_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
with pytest.raises(ValueError):
SelectPercentile(percentile=-1).fit(X, y)
with pytest.raises(ValueError):
SelectPercentile(percentile=101).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='percentile', param=-1).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='percentile', param=101).fit(X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=bool))
assert np.sum(support[5:] == 1) < 3
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1])
@pytest.mark.parametrize("n_informative", [1, 5, 10])
def test_select_fdr_regression(alpha, n_informative):
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert alpha >= false_discovery_rate
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert false_discovery_rate > alpha / 10
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=bool))
assert np.sum(support[5:] == 1) < 2
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert Xt.shape == (2, 2)
assert 9998 not in Xt
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert Xt.shape == (2, 2)
assert 9998 not in Xt
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert Xt.shape == (3, 2)
assert 0 not in Xt
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert Xt.shape == (3, 2)
assert 0 not in Xt
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, k=2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
with pytest.raises(TypeError):
SelectFeatures(score_func=10).fit(X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
with pytest.raises(ValueError):
SelectKBest(k=-1).fit(X, y)
with pytest.raises(ValueError):
SelectKBest(k=4).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='k_best', param=-1).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='k_best', param=4).fit(X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert X_selected.shape == (40, 0)
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
| bsd-3-clause |
msincenselee/vnpy | vnpy/data/stock/stock_base.py | 1 | 2934 | # flake8: noqa
"""
# 追加/更新股票基础信息
"""
import os
import sys
import json
from typing import Any
from collections import OrderedDict
import pandas as pd
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
import baostock as bs
from vnpy.trader.constant import Exchange
from vnpy.trader.utility import load_json, load_data_from_pkb2, save_data_to_pkb2
from vnpy.data.tdx.tdx_common import get_stock_type
import baostock as bs
stock_type_map = {
"1": '股票', "2": "指数", "3": "其他"
}
STOCK_BASE_FILE = 'stock_base.pkb2'
# get_stock_base 返回数据格式
# vt_symbol: {
# 'exchange': 交易所代码
# 'code': 股票代码
# 'name': 中文名
# 'ipo_date': 上市日期
# 'out_date': 退市日期
# '类型': 股票,指数,其他
# 'type': stock_cn, index_cn,etf_cn,bond_cn,cb_cn
# 'status': '上市' '退市'
# }
def get_stock_base():
""" 获取股票基础信息"""
base_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), STOCK_BASE_FILE))
base_data = load_data_from_pkb2(base_file_name)
if base_data is None:
return update_stock_base()
else:
return base_data
def update_stock_base():
"""
更新股票基础信息
:return:
"""
base_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), STOCK_BASE_FILE))
base_data = load_data_from_pkb2(base_file_name)
if base_data is None:
base_data = dict()
login_msg = bs.login()
if login_msg.error_code != '0':
print(f'证券宝登录错误代码:{login_msg.error_code}, 错误信息:{login_msg.error_msg}')
return base_data
rs = bs.query_stock_basic()
if rs.error_code != '0':
print(f'证券宝获取沪深A股历史K线数据错误代码:{rs.error_code}, 错误信息:{rs.error_msg}')
return
# [dict] => dataframe
print(f'返回字段:{rs.fields}')
while (rs.error_code == '0') and rs.next():
row = rs.get_row_data()
exchange_code, stock_code = row[0].split('.')
exchange = Exchange.SSE if exchange_code == 'sh' else Exchange.SZSE
d = {
'exchange': exchange.value,
'code': stock_code,
'name': row[1],
'ipo_date': row[2],
'out_date': row[3],
'类型': stock_type_map.get(row[4], '其他'),
'type': get_stock_type(stock_code),
'status': '上市' if row[5] == '1' else '退市'
}
base_data.update({f'{stock_code}.{exchange.value}': d})
# print(f'{d}')
save_data_to_pkb2(base_data, base_file_name)
print(f'更新完毕')
return base_data
if __name__ == '__main__':
update_stock_base()
| mit |
h-mayorquin/g_node_data_analysis_205 | 4_day/logistic_regresion.py | 1 | 1578 | import numpy as np
from load_data import X, Y
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
Y = Y.astype('int')
c_set = np.arange(0.1, 1, 0.1)
error_set = np.zeros(c_set.size)
error_set_l1 = np.zeros(c_set.size)
error_set_l2 = np.zeros(c_set.size)
penalities = ['l1', 'l2']
for penality in penalities:
for index, C in enumerate(c_set):
# We define the logistic regression
lg = LogisticRegression(penalty=penality, dual=False, tol=0.0001,
C=C, fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None)
# We fit the regresssion
lg.fit(X, Y)
class_error = lg.score(X, Y)
if penality == 'l1':
error_set_l1[index] = class_error
if penality == 'l2':
error_set_l2[index] = class_error
# We calculate generalization error
plt.plot(c_set, error_set_l1, label='l1')
plt.hold(True)
plt.plot(c_set, error_set_l2, label='l2')
plt.xlabel('Percentage of correctly classified data')
plt.ylabel('rsm value')
plt.legend()
plt.show()
# Plot in 3D
plot_3d = True
aux_x = np.arange(-5, 5, 0.1)
aux_y = np.arange(-5, 5, 0.1)
grid = np.meshgrid(aux_x, aux_y)
x = grid[0].ravel()
y = grid[1].ravel()
aux = np.vstack((x, y)).T
z = lg.decision_function(aux)
z = lg.predict_proba(aux)
if plot_3d:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z[:, 0], rstride=10, cstride=10)
plt.show()
| bsd-2-clause |
pazagra/catkin_ws | src/Multimodal_Interaction/Obj_segment/Obj_Cand.py | 1 | 15760 | import numpy as np
import cv2
import timeit
import matplotlib.pyplot as plt
from skimage.segmentation import felzenszwalb,slic
from skimage.segmentation import mark_boundaries
from skimage.color import label2rgb
import multiprocessing
import random
import Obj_segment.Rect
path = "/media/iglu/Data/DatasetIglu"
u = ['user1', 'user2', 'user3', 'user4', 'user5', 'user6', 'user7', 'user8', 'user9', 'user10'] #
a = ['point_1', 'point_2', 'point_3', 'point_4', 'point_5', 'point_6', 'point_7', 'point_8', 'point_9', 'point_10',
'show_1', 'show_2', 'show_3', 'show_4', 'show_5', 'show_6', 'show_7', 'show_8', 'show_9', 'show_10']
homo=np.array([[ 1.00567306e+00 , -4.85118860e-01 , -1.84060385e+01],[ 2.23046547e-02 , 2.27148983e-03 , 1.80858908e+02],[ -1.17505053e-04, -1.38922057e-03 , 1.00000000e+00]])
homo= np.array([[ 9.94775973e-01 , -4.09621743e-01 , -4.37893262e+01],
[ -2.06444142e-02 , 2.43247181e-02 , 1.94859521e+02],
[ -1.13045909e-04 , -1.41217334e-03 , 1.00000000e+00]])
centro = (0,0)
angulo=0
def make_graph(grid):
# get unique labels
vertices = np.unique(grid)
# map unique labels to [1,...,num_labels]
reverse_dict = dict(zip(vertices, np.arange(len(vertices))))
grid2 = np.array([reverse_dict[x] for x in grid.flat]).reshape(grid.shape)
# create edges
down = np.c_[grid2[:-1, :].ravel(), grid2[1:, :].ravel()]
right = np.c_[grid2[:, :-1].ravel(), grid2[:, 1:].ravel()]
all_edges = np.vstack([right, down])
all_edges = all_edges[all_edges[:, 0] != all_edges[:, 1], :]
all_edges = np.sort(all_edges, axis=1)
num_vertices = len(vertices)
edge_hash = all_edges[:, 0] + num_vertices * all_edges[:, 1]
# find unique connections
edges = np.unique(edge_hash)
# undo hashing
edges = [[vertices[x % num_vertices],
vertices[x / num_vertices]] for x in edges]
return vertices, edges
def get_adj(N,segments,edges,img,n):
if N[0] == n:
N=N[1:]
Nei = np.unique([v for v in edges if v[0] in N or v[1] in N])
# print Nei
Imm = np.zeros((img.shape[0], img.shape[1]), np.uint8)
for x in xrange(len(N)):
sp = N[x]
if sp != n:
Imm[segments == sp] = 255
return Imm
def inside(img,seg,p1,p2):
D1 = img.copy()
D2 = img.copy()
D1 = D1[p1[1]:p2[1],p1[0]:p2[0]]
D2[D2 ==seg ] = 0
D2[D2 != 0] = -1
D2 +=1
D1[D1 ==seg ] = 0
D1[D1 != 0] = -1
D1 +=1
Sum1= np.sum(np.sum( np.array(D1)))
Sum2= np.sum(np.sum( np.array(D2)))
Sum3= (p2[1]-p1[1])*(p2[0]-p1[0])
# print seg
# print "% de SP: "+(Sum1*1.0/Sum2).__str__()+" S1: "+Sum1.__str__()
# print Sum3*0.85
if Sum1>int(0.30*Sum2) or Sum3*0.75<=Sum1:
return True
return False
def bbox3(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return cmin, cmax, rmin, rmax
def bbox2(img_sp,sp,p1,p2):
im = img_sp.copy()
# im = im[:,:,0]+im[:,:,1]+im[:,:,2]
for seg in sp:
if seg ==1:
continue
if inside(img_sp,seg,p1,p2):
# print "added"
im[im == seg] = 0
im[im!= 0] = -1
im+=1
im = np.array(im*255)
if np.sum(np.sum(im)) == 0:
return None,None,None,None
return bbox3(im)
def rotateImage(image, angle,center):
rot_mat = cv2.getRotationMatrix2D(center,angle,1.0)
result = cv2.warpAffine(image,rot_mat,image.shape[1::-1],flags=cv2.INTER_LINEAR)
# result = cv2.warpAffine(image, rot_mat, image.shape,flags=cv2.INTER_LINEAR)
return result
def Homo_get(x,y,inverted=False):
p1 = [float(x), float(y), 1.0]
p1 = np.array(p1)
if inverted:
r = np.dot(p1,np.linalg.inv(homo))
else:
r = np.dot(homo, p1)
r = r / r[2]
return r
def get_images(img1, Mask1, img2, Mask2, name):
Mask_1 = Mask1.copy()
kernel = np.ones((7, 7), np.uint8)
Mask_1 = cv2.dilate(Mask_1, kernel, 1)
kernel = np.ones((4, 4), np.uint8)
Mask_1 = cv2.erode(Mask_1, kernel, 1)
edged = cv2.Canny(Mask_1,1,240)
total = np.sum(np.sum( np.array(Mask_1[:, :, 0])))
x_ini = 0
y_ini = 0
width = 0
height= 0
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cc = 0
ind = 0
indd= 0
for cnt in cnts:
if cc < cv2.contourArea(cnt):
ind = indd
cc=cv2.contourArea(cnt)
indd+=1
if cc <0.6*total:
return None,None,None
x, y, w, h = cv2.boundingRect(cnts[ind])
rect = cv2.minAreaRect(cnts[ind])
angle =rect[2]
Mask_1 = rotateImage(Mask_1, rect[2], (x + w / 2, y + h / 2))
p_c = (x + w / 2, y + h / 2)
img1 = rotateImage(img1, angle, p_c)
edged = cv2.Canny(Mask_1, 30, 200)
total = np.sum(np.sum( np.array(Mask_1[:, :, 0])))
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in cnts:
if cv2.contourArea(cnt) < 0.6 * total:
continue
x, y, w, h = cv2.boundingRect(cnt)
x_ini = x
y_ini = y
width = w
height = h
Mask_1 = Mask_1[y:y + h, x:x + w]
Mask_1[0:5,:,:]=255
img1 = img1[y:y + h, x:x + w].copy()
break
# cv2.imwrite(name + ".jpg", Mask_1)
Mask_1 = cv2.bitwise_not(Mask_1)
img1 = cv2.bitwise_and(img1,img1,mask=Mask_1[:,:,0])
# Mask_1 = rotateImage(Mask_1, -angle, p_c)
# cv2.imwrite(name + ".jpg",img1)
edged = cv2.Canny(Mask_1, 50, 200)
i = 0
img2 = img2[200:480,:,:]
Mask2 = Mask2[200:480,:,0]
kernel = np.ones((7, 7), np.uint8)
Mask2 = cv2.dilate(Mask2, kernel, 1)
kernel = np.ones((4, 4), np.uint8)
Mask2 = cv2.erode(Mask2, kernel, 1)
ret3, Mask2 = cv2.threshold(Mask2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
Sup1 = cv2.bitwise_and(img2,img2,mask=Mask2)
Sup = cv2.cvtColor(Sup1,cv2.COLOR_BGR2RGB)
segments_fz = slic(Sup, n_segments=500, compactness=10)
segments_fz[Mask2!=255] = -1
segments_fz += 2
vert, edg = make_graph(segments_fz)
# Img_Slic = label2rgb(segments_fz,Sup,kind='avg')
# Img_Slic = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
Contornos=[]
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in cnts:
if cv2.contourArea(cnt) < 50:
continue
col = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
x, y, w, h = cv2.boundingRect(cnt)
if w > 320:
continue
cv2.rectangle(img1, (x,y),(x+w,y+h), col, 2)
r = Homo_get(x_ini +x,y_ini + y-20)
p1 = (min(int(r[0]),639),min(int(r[1])-200,279))
r = Homo_get(x_ini + x+w, y_ini + y +h-10)
p2 = (min(int(r[0]),639),min(int(r[1])-200,279))
if p1[0] < 0 or p1[1] <0 or p2[0] < 0 or p2[1] <0:
continue
sp = np.unique(np.array(segments_fz[p1[1]:p2[1],p1[0]:p2[0]]))
if len(sp) == 0:
None
elif sp[0] ==[1] and len(sp)==1:
print "Empty..."
else:
m = (p2[1]-p1[1])/2
mm = (p2[0]-p1[0])/2
sp = np.unique(np.array(segments_fz[p1[1]+m-10:p1[1]+m+10, p1[0]+mm-10:p2[0]+mm+10]))
# print sp
# img2[segments_fz!=sp]=0
Im = np.array(get_adj(sp, segments_fz, edg, Sup,1))
if np.sum(np.sum(Im))==0:
continue
# plt.imshow(Im)
# plt.show()
r1, r2, c1, c2 = bbox3(Im)
# masked_data = cv2.bitwise_and(masked_data, masked_data, mask=Im)
Contornos.append([[ r1, c1+200, r2, c2+200, x_ini +x, y_ini+y, x_ini +x+w, y_ini+y+h] ,i] )
# Output = Img_Slic[p1[1]:p2[1],p1[0]:p2[0]]
# print p
# cv2.rectangle(Img_Slic,(r1,c1),(r2,c2),col,2)
# cv2.imwrite(name+"_"+i.__str__()+".jpg",Img_Slic)
# cv2.rectangle(Mask_1, (x, y), (x + w, y + h), (0, 255, 0))
# img2 = img[y:y + h, x:x + w, :].copy()
# cv2.imwrite(name + "_" + i.__str__() + ".jpg", img2)
# print name + "_" + i.__str__() + ".jpg"
i+=1
# cv2.imwrite(name + ".jpg",img1)
# cv2.imwrite(name + "_s.jpg", Sup1)
# cv2.imwrite(name + "_Total.jpg", Img_Slic)
return Contornos,angle,p_c
def add_cnt(Cnts,cnt):
if len(Cnts)==0:
Cnts.append([cnt,1])
else:
done= False
for i in xrange(len(Cnts)):
P = Cnts[i][0][0]
p1 = cnt[0]
if abs(P[4]-p1[4])<=20 and abs(P[5]-p1[5])<=20 and abs(P[6]-p1[6])<=20 and abs(P[7]-p1[7])<=20:
Cnts[i][1]= Cnts[i][1] + 1
done=True
if not done:
Cnts.append([cnt,1])
def obtain_cand(Initial,Initial2,Nsme,user,action,Total):
TT = Initial[1].copy()
Rg = Initial[0].copy()
Output = []
kernel = np.ones((7, 7), np.uint8)
Mask2 = cv2.dilate(Initial2[1][:,:,0], kernel, 1)
kernel = np.ones((4, 4), np.uint8)
Mask2 = cv2.erode(Mask2, kernel, 1)
Mask2 = cv2.bitwise_not(Mask2)
kernel = np.ones((7, 7), np.uint8)
Mask1 = cv2.dilate(Initial2[0][:,:,0], kernel, 1)
kernel = np.ones((4, 4), np.uint8)
Mask1 = cv2.erode(Mask1, kernel, 1)
Mask1 = cv2.bitwise_not(Mask1)
Rg1 = cv2.bitwise_and(Rg,Rg,mask=Mask1)
Sup1 = cv2.bitwise_and(Initial[1],Initial[1],mask=Mask2)
Sup = cv2.cvtColor(Sup1, cv2.COLOR_BGR2RGB)
segments_fz = slic(Sup, n_segments=250, compactness=20, sigma=5)
segments_fz[Mask2 < 1] = -1
segments_fz += 2
# Img_Slic = label2rgb(segments_fz, Sup, kind='avg')
# Img_Slic_TT = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
# Img_Slic = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
for i in xrange(len(Total)):
col = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
T= Total[i][0][0]
x,y,x2,y2 = T[0],T[1],T[2],T[3]
cv2.rectangle(Rg1, (T[4], T[5]), (T[6],T[7]), col, 2)
P1 = Obj_segment.Rect.Point(T[4], T[5])
P2 = Obj_segment.Rect.Point(T[6],T[7])
Rec_top = Obj_segment.Rect.Rect(P1,P2)
sp = np.array(segments_fz[y:y2,x:x2])
sp = np.unique(sp)
if len(sp) == 0:
# Output =Img_Slic[y:y2,x:x2]
P1 = Obj_segment.Rect.Point(x,y)
P2 = Obj_segment.Rect.Point(x2,y2)
rec = Obj_segment.Rect.Rect(P1,P2)
elif sp[0] ==[1] and len(sp)==1:
# Output = Img_Slic[y:y2, x:x2]
P1 = Obj_segment.Rect.Point(x, y)
P2 = Obj_segment.Rect.Point(x2, y2)
rec = Obj_segment.Rect.Rect(P1, P2)
else:
rmin, rmax,cmin, cmax = bbox2(segments_fz, sp,(x,y),(x2,y2))
if rmin is None:
continue
# Output = TT[cmin:cmax,rmin:rmax]
P1 = Obj_segment.Rect.Point(rmin, cmin)
P2 = Obj_segment.Rect.Point(rmax, cmax)
rec = Obj_segment.Rect.Rect(P1, P2)
Ouput_Top = Rg[T[5]:T[7],T[4]:T[6]]
Output.append((rec,Rec_top))
# cv2.imwrite("Morphed/Patches_Front/"+user+"_"+action+"_"+Nsme[:-4]+"_"+i.__str__()+"_Front.jpg",Output)
# cv2.imwrite("Morphed/Patches_Top/" + user + "_" + action + "_" + Nsme[:-4] + "_" + i.__str__() + "_Top.jpg", Ouput_Top)
# cv2.rectangle(Img_Slic_TT,(x,y),(x2,y2),col,3)
# cv2.imwrite("Morphed/Top/" + user + "_" + action + "_" + Nsme[:-4] + "_v2" + "_Top.jpg", Rg1)
# cv2.imwrite("Morphed/Front/"+user+"_"+action+"_"+Nsme[:-4]+"_v2"+ "_Front.jpg",Img_Slic_TT)
return Output
def get_candidate(Images):
def Clean_Output(Output):
def getKey(item):
area = abs(item[0].top-item[0].bottom)*abs(item[0].left-item[0].right)
return area
Out = []
Output = sorted(Output,key=getKey,reverse=True)
for i in xrange(len(Output)):
N=False
for j in xrange(i+1,len(Output)):
p1 = Output[i]
p1 = p1[1]
P = Output[j]
P = P[1]
if abs(P.bottom - p1.bottom) <= 20 and abs(P.top - p1.top) <= 20 and abs(P.left - p1.left) <= 20 and abs(P.right - p1.right) <= 20:
N = True
if not N:
Out.append(Output[i])
return Out
count = 0
Total = []
Total2 = []
Initial = []
Initial2 = []
for f in Images:
RGB1,Mask1,RGB2,Mask2 = f
R, angle, p_c = get_images(RGB1, Mask1, RGB2, Mask2,"")
if R == [] or R is None:
continue
RGB1 = rotateImage(RGB1, angle, p_c)
Mask1 = rotateImage(Mask1, angle, p_c)
Initial.append((RGB1, RGB2))
Initial2.append((Mask1, Mask2))
for K in xrange(len(R)):
add_cnt(Total, R[K])
if count > 5:
break
count += 1
removers = []
for i in xrange(len(Total)):
if Total[i][1] < 4:
removers.append(Total[i])
for i in xrange(len(removers)):
Total.remove(removers[i])
for i in xrange(len(Initial)):
Total2.append(obtain_cand(Initial[i], Initial2[i], "","","", Total))
Total = [item for sublist in Total2 for item in sublist]
Total = Clean_Output(Total)
return Total
def func(arg):
nu, na = arg
user = u[nu]
action = a[na]
print user
print action
count=0
Total = []
f = open(path + "/" + user + "/" + action + "/k2" + "/List.txt", 'r')
f2 = open(path + "/" + user + "/" + action + "/k1" + "/List.txt", 'r')
Initial = []
Initial2 = []
Nsme = []
for line in f:
Time = line
file1 = next(f).rstrip('\n')
file2 = next(f).rstrip('\n')
Label = next(f).rstrip('\n')
RGB1 = cv2.imread(path + "/" + user + "/" + action + "/k2" + "/RGB/" + file1)
Depth1 = np.load(path + "/" + user + "/" + action + "/k2" + "/Depth/" + file2)
Mask1 = cv2.imread(path + "/" + user + "/" + action + "/k2" + "/MTA/" + file1)
Time = next(f2).rstrip('\n')
file3 = next(f2).rstrip('\n')
file4 = next(f2).rstrip('\n')
Label = next(f2).rstrip('\n')
RGB2 = cv2.imread(path + "/" + user + "/" + action + "/k1" + "/RGB/" + file3)
Depth2 = np.load(path + "/" + user + "/" + action + "/k1" + "/Depth/" + file4)
Mask2 = cv2.imread(path + "/" + user + "/" + action + "/k1" + "/MTA/" + file3)
R,angle,p_c = get_images(RGB1, Mask1, RGB2,Mask2,"Morphed/"+user+"_"+action+"_"+file3[:-4])
if R is None:
continue
Nsme.append(file3)
RGB1= rotateImage(RGB1,angle,p_c)
Mask1 = rotateImage(Mask1,angle,p_c)
Initial.append((RGB1,RGB2))
Initial2.append((Mask1,Mask2))
for K in xrange(len(R)):
add_cnt(Total,R[K])
if count > 5:
break
count+=1
removers=[]
for i in xrange(len(Total)):
if Total[i][1] <4:
removers.append(Total[i])
for i in xrange(len(removers)):
Total.remove(removers[i])
for i in xrange(len(Initial)):
obtain_cand(Initial[i],Initial2[i],Nsme[i],user,action,Total)
return Total,user,action
def In_Patch():
start_time1 = timeit.default_timer()
# z = [func((aa, bb)) for aa in range(10) for bb in range(20)]
# print z
z = [(aa, bb) for aa in range(10) for bb in range(20)]
pool = multiprocessing.Pool(6)
R = pool.map(func, z,1)
pool.close()
pool.join()
Total = [item for sublist in R for item in sublist]
import joblib
joblib.dump(Total,"Values_chosen.pkl",compress=9)
elapsed = timeit.default_timer() - start_time1
print "Tiempo: " + elapsed.__str__()
| gpl-3.0 |
dsullivan7/scikit-learn | sklearn/linear_model/omp.py | 11 | 29952 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False, return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
lbishal/scikit-learn | examples/classification/plot_lda.py | 142 | 2419 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
hamid-omid/search_relevance | spell_corrector.py | 1 | 1712 | '''
Spell checking the search_queries. It takes a rather long time to run;
suitable for over-the-night runs.
INPUT FILES:
train.csv (raw data file)
test.csv (raw data file)
OUTPUTS:
spell_corr.py
__Author__:
Ali Narimani
__Veresion__:
1.2
'''
import requests
import re
import time
from random import randint
import pandas as pd
# Reading input data:
train = pd.read_csv('../data/train.csv', encoding="ISO-8859-1")
test = pd.read_csv('../data/test.csv', encoding="ISO-8859-1")
START_SPELL_CHECK="<span class=\"spell\">Showing results for</span>"
END_SPELL_CHECK="<br><span class=\"spell_orig\">Search instead for"
HTML_Codes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&'),
)
def spell_check(s):
q = '+'.join(s.split())
time.sleep( randint(0,2) ) #relax and don't make google angry
r = requests.get("https://www.google.co.uk/search?q="+q)
content = r.text
start=content.find(START_SPELL_CHECK)
if ( start > -1 ):
start = start + len(START_SPELL_CHECK)
end=content.find(END_SPELL_CHECK)
search= content[start:end]
search = re.sub(r'<[^>]+>', '', search)
for code in HTML_Codes:
search = search.replace(code[1], code[0])
search = search[1:]
else:
search = s
return search ;
### start the spell_check :
data = ['train','test']
outfile = open("spell_corr.py", "w")
outfile.write('spell_check_dict={\n')
for df in data:
searches = eval(df)[10:20].search_term
for search in searches:
speel_check_search= spell_check(search)
if (speel_check_search != search):
outfile.write('"'+search+'"'+" : " + '"'+ speel_check_search+'"'+', \n')
outfile.write('}')
outfile.close()
# End of code
| mit |
UKPLab/sentence-transformers | examples/applications/clustering/agglomerative.py | 1 | 1733 | """
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i+1)
print(cluster)
print("")
| apache-2.0 |
lbishal/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
SSG-DRD-IOT/commercial-iot-security-system | opencv/tutorials/imageProcessing/transform/fourier.py | 1 | 4853 | """
Fourier Transform
-Find Fourier Transform of images using OpenCV
-utilize FFT functions in Numpy
-FT applications
functions:
cv2.
dft()
idft()
FT used to analyze freq characteristics of filters
for images
2D Discrete Fourier Transform used to find frequency domain
FFT calculates DFT
sinusoidal signal: x(t)=A * sin(2 * \pi *f * t)
f - freq signal
if freq domain taken, can see a spike at f
if signal sampled to form discrete signal, get same freq domain, but periodic in range:
[- \pi , \pi] or [0, 2 * \pi] (or [0, N] for N-pt DFT)
consider image a signal sampled in 2 directions
taking FT in both X and Y dirs gives freq representation of image
for sinusoidal signal, if ampl varies fast in time -> hi freq signal
for images:
amplitude varies drastically at edge points or noises
therefore edges and noises high freq contents of image
no changes in amplitude: lo freq component
"""
# FT in Numpy
# numpy has FFT package
# np.fft.fft2 prov. freq transform which is complex array
# arguments:
# input image (grayscale)
# size of output array; if greater than size of input image, input image padded w/ 0s before calculation of FFT
# less than input image: input image cropped
# no args passes: output size same as input
# result: zero freq component @ top left corner
# to bring to center: shift result by N/2 in both directions
# done by np.fft.fftshift()
# once find frequency transform -> find magnitude spectrum
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# can see whiter region at center, showing low freq content is prominent
# ^ found freq transform; now, can do ops in freq domain
# hi pass filtering
# image reconstruction (ie find inverse DFT)
# remove lo freqs with rectangular window, size 60x60
# apply inverse shift using np.fft.ifftshift()
# so DC component is again at top right hand corner
# find inverse FFT using np.ifft2()
# result complex #; take its abs value
rows, cols = img.shape
crow, ccol = rows/2, cols/2
fshift[crow-30:crow+30, ccol-30:ccol+30] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.subplot(131),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(img_back, cmap = 'gray')
plt.title('Image after HPF'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back)
plt.title('Result in JET'), plt.xticks([]), plt.yticks([])
plt.show()
# don't use rectangular filters for masking
# create ripple-like ringing effects
# mask converted to sinc shape, causing problem
# use Gaussian window instead
# Fourier Transform in OpenCV
# functions: cv2.dft() and cv2.idft()
# same result as before, but in 2 channels
# 1st channel: real part of result
# 2nd channel: imaginary part
# convert input image to np.float32 first
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg', 0)
dft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20 * np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
plt.subplot(121), plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# NOTE: use cv2.cartToPolar(), which returns both magnitude and phase
# now, we do inverse DFT
# previously, we created HPF
# now, remove hi freq contents of image
# -> apply LPF
# blurs the image
# create a mask first with high value, 1, @ low freq
# ie pass LF content
# 0 at HF region
rows, cols = img.shape
crow, ccol = rows/2, cols/2
# create mask first, center square is 1, all remaining zeros
mask = np.zeros((rows, cols, 2), np.uint8)
mask[crow-30:crow+30, ccol-30:ccol+30] = 1
# apply mask and iDFT
fshift = dft_shift * mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0], img_back[:,:,1])
plt.subplot(121), plt.imshow(img, cmap = 'gray)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img_back, cmap = 'gray)
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
| mit |
lbeltrame/bcbio-nextgen | bcbio/rnaseq/count.py | 2 | 2705 | """
count number of reads mapping to features of transcripts
"""
import os
import pandas as pd
from collections import defaultdict
import gffutils
from bcbio.log import logger
from bcbio.utils import file_exists
def combine_count_files(files, out_file=None, ext=".counts"):
"""
combine a set of count files into a single combined file
ext: remove this extension from the count files
"""
files = list(files)
files = [x for x in files if file_exists(x)]
if not files:
return None
col_names = [os.path.basename(x.replace(ext, "")) for x in files]
if not out_file:
out_dir = os.path.join(os.path.dirname(files[0]))
out_file = os.path.join(out_dir, "combined.counts")
if file_exists(out_file):
return out_file
logger.info("Combining count files into %s." % out_file)
row_names = []
col_vals = defaultdict(list)
for i, f in enumerate(files):
vals = []
if i == 0:
with open(f) as in_handle:
for line in in_handle:
if not line.strip().startswith("#"):
rname, val = line.strip().split("\t")
row_names.append(rname)
vals.append(val)
else:
with open(f) as in_handle:
for line in in_handle:
if not line.strip().startswith("#"):
try:
_, val = line.strip().split("\t")
except ValueError:
print(f, line)
raise
vals.append(val)
col_vals[col_names[i]] = vals
df = pd.DataFrame(col_vals, index=row_names)
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
def annotate_combined_count_file(count_file, gtf_file, out_file=None):
if not count_file:
return None
dbfn = gtf_file + ".db"
if not file_exists(dbfn):
return None
if not gffutils:
return None
db = gffutils.FeatureDB(dbfn, keep_order=True)
if not out_file:
out_dir = os.path.dirname(count_file)
out_file = os.path.join(out_dir, "annotated_combined.counts")
# if the genes don't have a gene_id or gene_name set, bail out
try:
symbol_lookup = {f['gene_id'][0]: f['gene_name'][0] for f in
db.features_of_type('exon')}
except KeyError:
return None
df = pd.io.parsers.read_csv(count_file, sep="\t", index_col=0, header=0)
df['symbol'] = df.apply(lambda x: symbol_lookup.get(x.name, ""), axis=1)
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
| mit |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/tools/merge.py | 9 | 44730 | """
SQL-style merge routines
"""
import numpy as np
from pandas.compat import range, lrange, lzip, zip, map, filter
import pandas.compat as compat
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame, _merge_doc
from pandas.core.generic import NDFrame
from pandas.core.series import Series
from pandas.core.index import (Index, MultiIndex, _get_combined_index,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util.decorators import Appender, Substitution
from pandas.core.common import ABCSeries, isnull
import pandas.core.common as com
import pandas.algos as algos
import pandas.hashtable as _hash
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def ordered_merge(left, right, on=None, left_by=None, right_by=None,
left_on=None, right_on=None,
fill_method=None, suffixes=('_x', '_y')):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
"""
def _merger(x, y):
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
# left_index=left_index, right_index=right_index,
suffixes=suffixes, fill_method=fill_method)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
if not isinstance(left_by, (list, tuple)):
left_by = [left_by]
pieces = []
for key, xpiece in left.groupby(left_by):
merged = _merger(xpiece, right)
for k in left_by:
# May have passed ndarray
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
return concat(pieces, ignore_index=True)
elif right_by is not None:
if not isinstance(right_by, (list, tuple)):
right_by = [right_by]
pieces = []
for key, ypiece in right.groupby(right_by):
merged = _merger(left, ypiece)
for k in right_by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
return concat(pieces, ignore_index=True)
else:
return _merger(left, right)
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError('indicator option can only accept boolean or string arguments')
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='merge')
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when data contains a column named {}".format(i))
if self.indicator_name in columns:
raise ValueError("Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] + result['_right_indicator']), categories=[1,2,3])
result[self.indicator_name] = result[self.indicator_name].cat.rename_categories(['left_only', 'right_only', 'both'])
result = result.drop(labels=['_left_indicator', '_right_indicator'], axis=1)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
# insert group keys
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
if name in result:
key_indexer = result.columns.get_loc(name)
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if len(self.left) == 0:
continue
na_indexer = (left_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
right_na_indexer = right_indexer.take(na_indexer)
result.iloc[na_indexer,key_indexer] = com.take_1d(self.right_join_keys[i],
right_na_indexer)
elif name in self.right:
if len(self.right) == 0:
continue
na_indexer = (right_indexer == -1).nonzero()[0]
if len(na_indexer) == 0:
continue
left_na_indexer = left_indexer.take(na_indexer)
result.iloc[na_indexer,key_indexer] = com.take_1d(self.left_join_keys[i],
left_na_indexer)
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
if name is None:
name = 'key_%d' % i
# a faster way?
key_col = com.take_1d(self.left_join_keys[i], left_indexer)
na_indexer = (left_indexer == -1).nonzero()[0]
right_na_indexer = right_indexer.take(na_indexer)
key_col.put(na_indexer, com.take_1d(self.right_join_keys[i],
right_na_indexer))
result.insert(i, name, key_col)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index:
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort, how=self.how)
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_data(self):
"""
Handles overlapping column names etc.
"""
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(
ldata.items, lsuf, rdata.items, rsuf)
if not llabels.equals(ldata.items):
ldata = ldata.copy(deep=False)
ldata.set_axis(0, llabels)
if not rlabels.equals(rdata.items):
rdata = rdata.copy(deep=False)
rdata.set_axis(0, rlabels)
return ldata, rdata
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
self._validate_specification()
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(left)
is_rkey = lambda x: isinstance(x, (np.ndarray, ABCSeries)) and len(x) == len(right)
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
right_keys.append(right[rk]._values)
join_names.append(rk)
else:
if not is_rkey(rk):
right_keys.append(right[rk]._values)
if lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
left_keys.append(left[lk]._values)
join_names.append(lk)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if (self.on is None and self.left_on is None
and self.right_on is None):
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass on OR left_on and '
'right_on')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
"""
Parameters
----------
Returns
-------
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip( * map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = {'sort':sort} if how == 'left' else {}
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
def __init__(self, left, right, on=None, by=None, left_on=None,
right_on=None, axis=1, left_index=False, right_index=False,
suffixes=('_x', '_y'), copy=True,
fill_method=None):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, axis=axis,
left_index=left_index,
right_index=right_index,
how='outer', suffixes=suffixes,
sort=True # sorts when factorizing
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = algos.ffill_indexer(left_indexer)
right_join_indexer = algos.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='ordered_merge')
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip( * map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return algos.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = \
algos.left_outer_join(com._ensure_int64(left_key),
com._ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = algos.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': algos.inner_join,
'left': algos.left_outer_join,
'right': _right_outer_join,
'outer': algos.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if com.is_datetime64tz_dtype(lk) and com.is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
if com.is_int_or_datetime_dtype(lk) and com.is_int_or_datetime_dtype(rk):
klass = _hash.Int64Factorizer
lk = com._ensure_int64(com._values_from_object(lk))
rk = com._ensure_int64(com._values_from_object(rk))
else:
klass = _hash.Factorizer
lk = com._ensure_object(lk)
rk = com._ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
sorter = uniques.argsort()
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
new_left = reverse_indexer.take(com._ensure_platform_int(left))
np.putmask(new_left, left == -1, -1)
new_right = reverse_indexer.take(com._ensure_platform_int(right))
np.putmask(new_right, right == -1, -1)
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
from pandas.core.groupby import _int64_overflow_possible
# how many levels can be done without overflow
pred = lambda i: not _int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
#----------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes. Can also add a layer of hierarchical indexing on the
concatenation axis, which may be useful if the labels are the same (or
overlapping) on the passed axis number
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0, 1, ...}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
copy : boolean, default True
If False, do not copy data unnecessarily
Notes
-----
The keys, levels, and names arguments are all optional
Returns
-------
concatenated : type of objects
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = [obj for obj in objs if obj is not None]
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
keys = clean_keys
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
raise TypeError("cannot concatenate a non-NDFrame object")
# consolidate
obj.consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties
# if we have not multi-index possibiltes
df = DataFrame([ obj.shape for obj in objs ]).sum(1)
non_empties = df[df!=0]
if len(non_empties) and (keys is None and names is None and levels is None and join_axes is None):
objs = [ objs[i] for i in non_empties.index ]
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim-1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj,'name',None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({ name : obj })
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
new_data = com._concat_compat([x._values for x in self.objs])
name = com._consensus_name_attr(self.objs)
return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
index, columns = self.new_axes
tmpdf = DataFrame(data, index=index)
# checks if the column variable already stores valid column names (because set via the 'key' argument
# in the 'concat' function call. If that's not the case, use the series names as column names
if columns.equals(Index(np.arange(len(self.objs)))) and not self.ignore_index:
columns = np.array([ data[i].name for i in range(len(data)) ], dtype='object')
indexer = isnull(columns)
if indexer.any():
columns[indexer] = np.arange(len(indexer[indexer]))
tmpdf.columns = columns
return tmpdf.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
# ufff...
indices = lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
if self._is_series:
all_indexes = [x.index for x in self.objs]
else:
try:
all_indexes = [x._data.axes[i] for x in self.objs]
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
return _get_combined_index(all_indexes, intersect=self.intersect)
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = Index(np.arange(len(self.objs)))
idx.is_unique = True # arange is always unique
return idx
elif self.keys is None:
names = []
for x in self.objs:
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
if x.name is not None:
names.append(x.name)
else:
idx = Index(np.arange(len(self.objs)))
idx.is_unique = True
return idx
return Index(names)
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = Index(np.arange(sum(len(i) for i in indexes)))
idx.is_unique = True
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
levels = [Categorical.from_array(zp, ordered=True).categories for zp in zipped]
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
factor = Categorical.from_array(concat_index, ordered=True)
levels.append(factor.categories)
label_list.append(factor.codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([ i.nlevels for i in indexes ])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
def _should_fill(lname, rname):
if not isinstance(lname, compat.string_types) or not isinstance(rname, compat.string_types):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| gpl-2.0 |
sci-wms/sci-wms | wms/tests/test_ugrid.py | 2 | 9966 | # -*- coding: utf-8 -*-
from copy import copy
from django.test import TestCase
import pandas as pd
from wms.tests import add_server, add_group, add_user, add_dataset, image_path
from wms.models import Dataset, UGridDataset
from wms import logger # noqa
import pytest
xfail = pytest.mark.xfail
class TestUgrid(TestCase):
@classmethod
def setUpClass(cls):
add_server()
add_group()
add_user()
add_dataset("ugrid_testing", "ugrid", "selfe_ugrid.nc")
@classmethod
def tearDownClass(cls):
d = Dataset.objects.get(slug="ugrid_testing")
d.delete()
def setUp(self):
self.dataset_slug = 'ugrid_testing'
self.url_params = dict(
service = 'WMS',
request = 'GetMap',
version = '1.1.1',
layers = 'surface_salt',
format = 'image/png',
transparent = 'true',
height = 256,
width = 256,
srs = 'EPSG:3857',
bbox = '-13756219.106426599,5811660.1345785195,-13736651.227185594,5831228.013819524'
)
self.gfi_params = dict(
service = 'WMS',
request = 'GetFeatureInfo',
version = '1.1.1',
query_layers = 'surface_salt',
info_format = 'text/csv',
srs = 'EPSG:3857',
bbox = '-13756219.106426599,5811660.1345785195,-13736651.227185594,5831228.013819524',
height = 256,
width = 256,
x = 128, # middle
y = 128 # middle
)
self.gmd_params = dict(
service = 'WMS',
request = 'GetMetadata',
version = '1.1.1',
query_layers = 'surface_salt',
srs = 'EPSG:3857',
bbox = '-13756219.106426599,5811660.1345785195,-13736651.227185594,5831228.013819524',
height = 256,
width = 256
)
def image_name(self, fmt):
return '{}.{}'.format(self.id().split('.')[-1], fmt)
def test_identify(self):
d = Dataset.objects.get(name=self.dataset_slug)
klass = Dataset.identify(d.uri)
assert klass == UGridDataset
def do_test(self, params, fmt=None, write=True):
fmt = fmt or 'png'
response = self.client.get('/wms/datasets/{}'.format(self.dataset_slug), params)
self.assertEqual(response.status_code, 200)
outfile = image_path(self.__class__.__name__, self.image_name(fmt))
if write is True:
with open(outfile, "wb") as f:
f.write(response.content)
return outfile
def test_ugrid_default_styles(self):
params = copy(self.url_params)
self.do_test(params)
def test_ugrid_filledcontours(self):
params = copy(self.url_params)
params.update(styles='filledcontours_cubehelix')
self.do_test(params)
def test_ugrid_filledcontours_50(self):
params = copy(self.url_params)
params.update(styles='filledcontours_cubehelix', numcontours=50)
self.do_test(params)
def test_ugrid_pcolor(self):
params = copy(self.url_params)
params.update(styles='pcolor_cubehelix')
self.do_test(params)
def test_ugrid_pcolor_logscale(self):
params = copy(self.url_params)
params.update(styles='pcolor_cubehelix', logscale=True)
self.do_test(params)
@xfail(reason="facets is not yet implemeted for UGRID datasets")
def test_ugrid_facets(self):
params = copy(self.url_params)
params.update(styles='facets_cubehelix')
self.do_test(params)
def test_ugrid_contours(self):
params = copy(self.url_params)
params.update(styles='contours_cubehelix')
self.do_test(params)
def test_ugrid_contours_50(self):
params = copy(self.url_params)
params.update(styles='contours_cubehelix', numcontours=50)
self.do_test(params)
def test_ugrid_gfi_single_variable_csv(self):
params = copy(self.gfi_params)
r = self.do_test(params, fmt='csv')
df = pd.read_csv(r)
assert df['time'][0] == '2015-04-28 02:45:00'
assert df['x'][0] == -123.4863
assert df['y'][0] == 46.256
assert df['surface_salt'][0] == 0
def test_ugrid_gfi_single_variable_csv_4326(self):
params = copy(self.gfi_params)
params['srs'] = 'EPSG:4326'
params['bbox'] = '-123.57421875,46.1950421087,-123.3984375,46.3165841818'
r = self.do_test(params, fmt='csv')
df = pd.read_csv(r)
assert df['time'][0] == '2015-04-28 02:45:00'
assert df['x'][0] == -123.4863
assert df['y'][0] == 46.256
assert df['surface_salt'][0] == 0
def test_ugrid_gfi_single_variable_tsv(self):
params = copy(self.gfi_params)
params['info_format'] = 'text/tsv'
params['query_layers'] = 'surface_temp'
self.do_test(params, fmt='tsv')
def test_ugrid_gfi_single_variable_json(self):
params = copy(self.gfi_params)
params['info_format'] = 'application/json'
self.do_test(params, fmt='json')
def test_ugrid_getmetadata_minmax(self):
params = copy(self.gmd_params)
params['item'] = 'minmax'
self.do_test(params, fmt='json')
def test_getCaps(self):
params = dict(request='GetCapabilities')
self.do_test(params, write=False)
def test_create_layers(self):
d = Dataset.objects.get(name=self.dataset_slug)
assert d.layer_set.count() == 30
def test_delete_cache_signal(self):
d = add_dataset("ugrid_deleting", "ugrid", "selfe_ugrid.nc")
self.assertTrue(d.has_cache())
d.clear_cache()
self.assertFalse(d.has_cache())
class TestFVCOM(TestCase):
@classmethod
def setUpClass(cls):
add_server()
add_group()
add_user()
add_dataset("fvcom_testing", "ugrid", "fvcom_vectors.nc")
@classmethod
def tearDownClass(cls):
d = Dataset.objects.get(slug="fvcom_testing")
d.delete()
def setUp(self):
self.dataset_slug = 'fvcom_testing'
self.url_params = dict(
service = 'WMS',
request = 'GetMap',
version = '1.1.1',
layers = 'u,v',
format = 'image/png',
transparent = 'true',
height = 256,
width = 256,
srs = 'EPSG:3857',
bbox = '-10018754.171394622,2504688.5428486555,-8766409.899970293,3757032.814272983'
)
self.gfi_params = dict(
service = 'WMS',
request = 'GetFeatureInfo',
version = '1.1.1',
query_layers = 'u,v',
info_format = 'text/csv',
srs = 'EPSG:3857',
bbox = '-10018754.171394622,2504688.5428486555,-8766409.899970293,3757032.814272983',
height = 256,
width = 256,
x = 256, # Top right
y = 0 # Top right
)
self.gmd_params = dict(
service = 'WMS',
request = 'GetMetadata',
version = '1.1.1',
query_layers = 'u,v',
srs = 'EPSG:3857',
bbox = '-10018754.171394622,2504688.5428486555,-8766409.899970293,3757032.814272983',
height = 256,
width = 256
)
def image_name(self, fmt):
return '{}.{}'.format(self.id().split('.')[-1], fmt)
def test_identify(self):
d = Dataset.objects.get(name=self.dataset_slug)
klass = Dataset.identify(d.uri)
assert klass == UGridDataset
def do_test(self, params, fmt=None, write=True):
fmt = fmt or 'png'
response = self.client.get('/wms/datasets/{}'.format(self.dataset_slug), params)
self.assertEqual(response.status_code, 200)
outfile = image_path(self.__class__.__name__, self.image_name(fmt))
if write is True:
with open(outfile, "wb") as f:
f.write(response.content)
return outfile
def test_fvcom_filledcontours(self):
params = copy(self.url_params)
params.update(styles='filledcontours_cubehelix', layers='u')
self.do_test(params)
def test_fvcom_pcolor(self):
params = copy(self.url_params)
params.update(styles='pcolor_cubehelix', layers='u')
self.do_test(params)
@xfail(reason="facets is not yet implemeted for UGRID datasets")
def test_fvcom_facets(self):
params = copy(self.url_params)
params.update(styles='facets_cubehelix', layers='u')
self.do_test(params)
def test_fvcom_contours(self):
params = copy(self.url_params)
params.update(styles='contours_cubehelix', layers='u')
self.do_test(params)
def test_fvcom_gfi_single_variable_csv(self):
params = copy(self.gfi_params)
r = self.do_test(params, fmt='csv')
df = pd.read_csv(r, index_col='time')
assert df['x'][0] == -82.8046
assert df['y'][0] == 29.1632
assert df['u'][0] == -0.0467
assert df['v'][0] == 0.0521
def test_fvcom_vectorscale(self):
params = copy(self.url_params)
params['vectorscale'] = 10
params['styles'] = 'vectors_cubehelix'
self.do_test(params)
def test_fvcom_vectorstep(self):
params = copy(self.url_params)
params['vectorstep'] = 10
params['styles'] = 'vectors_cubehelix'
self.do_test(params)
def test_fvcom_getCaps(self):
params = dict(request='GetCapabilities')
self.do_test(params, write=False)
| gpl-3.0 |
larsmans/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
tmilicic/networkx | examples/drawing/sampson.py | 4 | 1389 | #!/usr/bin/env python
"""
Sampson's monastery data.
Shows how to read data from a zip file and plot multiple frames.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import zipfile, cStringIO
import networkx as nx
import matplotlib.pyplot as plt
zf = zipfile.ZipFile('sampson_data.zip') # zipfile object
e1=cStringIO.StringIO(zf.read('samplike1.txt')) # read info file
e2=cStringIO.StringIO(zf.read('samplike2.txt')) # read info file
e3=cStringIO.StringIO(zf.read('samplike3.txt')) # read info file
G1=nx.read_edgelist(e1,delimiter='\t')
G2=nx.read_edgelist(e2,delimiter='\t')
G3=nx.read_edgelist(e3,delimiter='\t')
pos=nx.spring_layout(G3,iterations=100)
plt.clf()
plt.subplot(221)
plt.title('samplike1')
nx.draw(G1,pos,node_size=50,with_labels=False)
plt.subplot(222)
plt.title('samplike2')
nx.draw(G2,pos,node_size=50,with_labels=False)
plt.subplot(223)
plt.title('samplike3')
nx.draw(G3,pos,node_size=50,with_labels=False)
plt.subplot(224)
plt.title('samplike1,2,3')
nx.draw(G3, pos, edgelist=list(G3.edges()), node_size=50, with_labels=False)
nx.draw_networkx_edges(G1,pos,alpha=0.25)
nx.draw_networkx_edges(G2,pos,alpha=0.25)
plt.savefig("sampson.png") # save as png
plt.show() # display
| bsd-3-clause |
vkuznet/rep | rep/metaml/stacking.py | 4 | 4905 | """
This module contains stacking strategies (meta-algorithms of machine learning).
"""
from __future__ import division, print_function, absolute_import
import numpy
from sklearn.base import clone
from ..estimators import Classifier
from ..estimators.utils import check_inputs, _get_features
__author__ = 'Alex Rogozhnikov'
class FeatureSplitter(Classifier):
"""
Dataset is split by values of `split_feature`,
for each value of feature, new classifier is trained.
When building predictions, classifier predicts the events with
the same value of `split_feature` it was trained on.
:param str split_feature: the name of key feature,
:param base_estimator: the classifier, its' copies are trained on parts of dataset
:param list[str] features: list of columns classifier uses
.. note:: `split_feature` must be in list of `features`
"""
def __init__(self, split_feature, base_estimator, train_features=None):
self.base_estimator = base_estimator
self.split_feature = split_feature
self.train_features = train_features
Classifier.__init__(self, features=self._features())
def _features(self):
if self.train_features is None:
return None
else:
return list(self.train_features) + [self.split_feature]
def _get_features(self, X, allow_nans=False):
"""
:param pandas.DataFrame X: train dataset
:return: pandas.DataFrame with used features
"""
split_column_values, _ = _get_features([self.split_feature], X, allow_nans=allow_nans)
split_column_values = numpy.ravel(numpy.array(split_column_values))
X_prepared, self.train_features = _get_features(self.train_features, X, allow_nans=allow_nans)
self.features = self._features()
return split_column_values, X_prepared
def fit(self, X, y, sample_weight=None):
"""
Fit dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features] with features
:param y: array-like of shape [n_samples] with targets
:param sample_weight: array-like of shape [n_samples] with events weights or None.
:return: self
"""
if hasattr(self.base_estimator, 'features'):
assert self.base_estimator.features is None, 'Base estimator must have None features! ' \
'Use features parameter in Folding to fix it'
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=True)
# TODO cover the case of missing labels in subsets.
split_column_values, X = self._get_features(X)
self._set_classes(y)
self.base_estimators = {}
for value in numpy.unique(split_column_values):
rows = numpy.array(split_column_values) == value
base_classifier = clone(self.base_estimator)
if sample_weight is None:
base_classifier.fit(X.iloc[rows, :], y[rows])
else:
base_classifier.fit(X.iloc[rows, :], y[rows], sample_weight=sample_weight[rows])
self.base_estimators[value] = base_classifier
return self
def predict_proba(self, X):
"""
Predict probabilities. Each event will be predicted by the classifier
with trained on corresponding value of `split_feature`
:param X: pandas.DataFrame of shape [n_samples, n_features]
:return: probabilities of shape [n_samples, n_classes]
"""
split_column_values, X = self._get_features(X)
result = numpy.zeros([len(X), self.n_classes_])
for value, estimator in self.base_estimators.items():
mask = split_column_values == value
result[mask, :] = estimator.predict_proba(X.loc[mask, :])
return result
def staged_predict_proba(self, X):
"""
Predict probabilities after each stage of base classifier.
Each event will be predicted by the classifier
with trained on corresponding value of `split_feature`
:param X: pandas.DataFrame of shape [n_samples, n_features]
:return: iterable sequence of numpy.arrays of shape [n_samples, n_classes]
"""
split_column_values, X = self._get_features(X)
result = numpy.zeros([len(X), self.n_classes_])
masks_iterators = []
for value, estimator in self.base_estimators.items():
mask = split_column_values == value
prediction_iterator = estimator.staged_predict_proba(X.loc[mask, :])
masks_iterators.append([mask, prediction_iterator])
try:
while True:
for mask, prediction_iterator in masks_iterators:
result[mask, :] = next(prediction_iterator)
yield result
except StopIteration:
pass
| apache-2.0 |
hsiaoyi0504/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
gokulvk99/ColorPrism | SvmColorizer.py | 1 | 14629 | from __future__ import division
import numpy as np
import cv
import cv2
import itertools
from sklearn.svm import SVC, LinearSVC
from sklearn import preprocessing
import pdb
import pygco
from scipy.cluster.vq import kmeans,vq
from sklearn.decomposition import PCA
import scipy.ndimage.filters
from datetime import datetime
import time
from threading import Thread
from Queue import Queue
import colorsys
from random import randint, uniform
from skimage.color import rgb2lab
#from numba import autojit
class SvmColorizer(object):
def __init__(self, ncolors=16, probability=False, npca=32, svmgamma=0.1, svmC=1, graphcut_lambda=1, ntrain=3000, selfcolor=False,
window_size = 10, surf_window_size = 20):
self.surf_window = surf_window_size
self.window_size = window_size
self.levels = int(np.floor(np.sqrt(ncolors)))
self.ncolors = ncolors
self.ntrain = ntrain
# declare classifiers
self.svm = [SVC(probability=probability, gamma=svmgamma, C=svmC) for i in range(self.ncolors)]
#self.svm = [LinearSVC() for i in range(self.ncolors)]
self.scaler = preprocessing.MinMaxScaler() # Scaling object -- Normalizes feature array
self.pca = PCA(npca)
self.centroids = []
self.probability = probability
self.colors_present = []
self.surf = cv2.DescriptorExtractor_create('SURF')
self.surf.setBool('extended', True) #use the 128-length descriptors
self.graphcut_lambda=graphcut_lambda
self.concurrent=200
self.selfcolor = selfcolor
print " ncolors: %d pca: %d ntrain:%d selfcolor:%s window: %d surf_window: %d"%(self.ncolors,npca, self.ntrain, self.selfcolor,
self.window_size, self.surf_window)
#self.setupQueue()
#self.numba_init()
def getMean(self, img, pos):
xlim = (max(pos[0] - self.window_size,0), min(pos[0] + self.window_size,img.shape[1]))
ylim = (max(pos[1] - self.window_size,0), min(pos[1] + self.window_size,img.shape[0]))
return np.mean(img[ylim[0]:ylim[1],xlim[0]:xlim[1]])
def getVariance(self, img, pos):
xlim = (max(pos[0] - self.window_size,0), min(pos[0] + self.window_size,img.shape[1]))
ylim = (max(pos[1] - self.window_size,0), min(pos[1] + self.window_size,img.shape[0]))
return np.var(img[ylim[0]:ylim[1],xlim[0]:xlim[1]])/1000 #switched to Standard Deviation --A
def feature_surf(self, img, pos):
octave2 = cv2.GaussianBlur(img, (0, 0), 1)
octave3 = cv2.GaussianBlur(img, (0, 0), 2)
kp = cv2.KeyPoint(pos[0], pos[1], self.surf_window)
_, des1 = self.surf.compute(img, [kp])
_, des2 = self.surf.compute(octave2, [kp])
_, des3 = self.surf.compute(octave3, [kp])
return np.concatenate((des1[0], des2[0], des3[0]))
def feature_dft(self, img, pos):
xlim = (max(pos[0] - self.window_size,0), min(pos[0] + self.window_size,img.shape[1]))
ylim = (max(pos[1] - self.window_size,0), min(pos[1] + self.window_size,img.shape[0]))
patch = img[ylim[0]:ylim[1],xlim[0]:xlim[1]]
l = (2*self.window_size + 1)**2
#return all zeros for now if we're at the edge
if patch.shape[0]*patch.shape[1] != l:
return np.zeros(l)
return np.abs(np.fft(patch.flatten()))
#@autojit
def get_features(self, img, pos):
meanvar = np.array([self.getMean(img, pos), self.getVariance(img, pos)]) #variance is giving NaN
feat = np.concatenate((meanvar, self.feature_surf(img, pos), self.feature_dft(img, pos)))
return feat
def train(self, files):
features = []
self.local_grads = []
classes = []
kmap_a = []
kmap_b = []
# compute color map
for f in files:
print ("Training with " + f)
_,a,b = self.load_image(f)
kmap_a = np.concatenate([kmap_a, a.flatten()])
kmap_b = np.concatenate([kmap_b, b.flatten()])
startMillis = int(round(time.time() * 1000))
self.train_kmeans(kmap_a,kmap_b,self.ncolors)
endMillis = int(round(time.time() * 1000))
print (" K-Means (ms)" + str((endMillis - startMillis)))
for f in files:
l,a,b = self.load_image(f)
a,b = self.quantize_kmeans(a,b)
#dimensions of image
m,n = l.shape
startMillis = int(round(time.time() * 1000))
for i in xrange(self.ntrain):
#choose random pixel in training image
x = int(np.random.uniform(n))
y = int(np.random.uniform(m))
features.append(self.get_features(l, (x,y)))
classes.append(self.color_to_label_map[(a[y,x], b[y,x])])
#print ("Processing DONE " + f + " " + str(datetime.now()))
endMillis = int(round(time.time() * 1000))
print (" Training random pixes (ms)" + str((endMillis - startMillis)))
# normalize features to use and use PCA to minimize their #
self.features = self.scaler.fit_transform(np.array(features))
classes = np.array(classes)
self.features = self.pca.fit_transform(self.features)
for i in range(self.ncolors):
if len(np.where(classes==i)[0])>0:
curr_class = (classes==i).astype(np.int32)
#print("CURR i " + str(i) + " " + str(curr_class))
self.colors_present.append(i)
self.svm[i].fit(self.features,(classes==i).astype(np.int32))
return self
#@autojit
def input_image_feature_task(self, img, x, y, label_costs, skip, num_classes, count):
#innerLoopTime = int(round(time.time() * 1000))
if (0 == count % 10000):
print ("Processing "+str(count) + " " + str(datetime.now()));
feat = self.scaler.transform(self.get_features(img, (x,y)))
feat = self.pca.transform(feat)
#count += 1
# Hard-but-correct way to get g
# self.g[y-int(skip/2):y+int(skip/2)+1,x-int(skip/2):x+int(skip/2)+1] = self.color_variation(feat)
#get margins to estimate confidence for each class
for i in range(num_classes):
distance = self.svm[self.colors_present[i]].decision_function(feat)
cost = -1*self.svm[self.colors_present[i]].decision_function(feat)[0]
#print(" i " + str(i) + " COST "+str(cost) + " distance "+ str(distance))
label_costs[y-int(skip/2):y+int(skip/2)+1,x-int(skip/2):x+int(skip/2)+1,i] = cost
#def numba_init(self):
# self.savethread = pythonapi.PyEval_SaveThread
# self.savethread.argtypes = []
# self.savethread.restype = c_void_p
# self.restorethread = pythonapi.PyEval_RestoreThread
# self.restorethread.argtypes = [c_void_p]
# self.restorethread.restype = None
def doWork(self):
#print(" **** SETTING UP TASK " )
while True:
(img, x, y, label_costs, skip, num_classes, count) = self.queue.get()
self.input_image_feature_task(img, x, y, label_costs, skip, num_classes, count)
self.queue.task_done()
def setupQueue(self):
self.queue = Queue(self.concurrent * 4)
print("TASKS CREATED concurrent = "+str(self.concurrent))
for i in range(self.concurrent):
#print("TASKS CREATED "+str(i))
t = Thread(target=self.doWork)
t.daemon = True
t.start()
#print("TASKS CREATED ")
def enqueue(self, img, x, y, label_costs, skip, num_classes, count):
self.queue.put((img, x, y, label_costs, skip, num_classes, count))
#@autojit
def loop2d(self, img, n, m, skip, num_classes, label_costs):
count = 0
for x in xrange(0,n,skip):
for y in xrange(0,m,skip):
count = count+1
#self.enqueue(img, x, y, label_costs, skip, num_classes, count)
self.input_image_feature_task(img, x, y, label_costs, skip, num_classes, count)
def colorize(self, img, skip=4):
print "Skipping %d pixels" %(skip)
m,n = img.shape
num_classified = 0
_,raw_output_a,raw_output_b = cv2.split(cv2.cvtColor(cv2.merge((img, img, img)), cv.CV_RGB2Lab)) #default a and b for a grayscale image
output_a = np.zeros(raw_output_a.shape)
output_b = np.zeros(raw_output_b.shape)
num_classes = len(self.colors_present)
label_costs = np.zeros((m,n,num_classes))
self.g = np.zeros(raw_output_a.shape)
count=0
print("colorize() start =" + str(m) + ", n=" + str(n) + " Total iterations " + str(m/skip * n/skip)+ " at " + str(datetime.now()))
count=0
self.loop2d(img, n,m, skip, num_classes, label_costs)
#for x in xrange(0,n,skip):
#print("Coloring " + str(count) + " " + str(datetime.now()))
#fullInnerLoopTime = int(round(time.time() * 1000))
# for y in xrange(0,m,skip):
#self.input_image_feature_task(img, x, y, label_costs, skip, num_classes)
# count = count+1
#self.enqueue(img, x, y, label_costs, skip, num_classes, count)
# self.input_image_feature_task(img, x, y, label_costs, skip, num_classes, count)
#fulllInnerLoopEndTime = int(round(time.time() * 1000))
#print (" One Outer iteration time (secs)"+ str((fulllInnerLoopEndTime - fullInnerLoopTime)/1000))
#self.queue.join()
#edges = self.get_edges(img)
#self.g = np.sqrt(edges[0]**2 + edges[1]**2)
self.g = self.get_edges(img)
#self.g = np.log10(self.g)
print("input image features done for " + str(count) + " " + str(datetime.now()))
#postprocess using graphcut optimization
output_labels = self.graphcut(label_costs, l=self.graphcut_lambda)
print("graphcut done " + str(datetime.now()))
for i in range(m):
for j in range(n):
a,b = self.label_to_color_map[self.colors_present[output_labels[i,j]]]
output_a[i,j] = a
output_b[i,j] = b
output_img = cv2.cvtColor(cv2.merge((img, np.uint8(output_a), np.uint8(output_b))), cv.CV_Lab2RGB)
print("colors applied " + str(datetime.now()))
return output_img, self.g
def load_image(self, path):
img = cv2.imread(path)
#convert to L*a*b* space and split into channels
l, a, b = cv2.split(cv2.cvtColor(img, cv.CV_BGR2Lab))
if (self.selfcolor == True):
a = l
b = l
return l, a, b
def get_edges(self, img, blur_width=3):
img_blurred = cv2.GaussianBlur(img, (0, 0), blur_width)
vh = cv2.Sobel(img_blurred, -1, 1, 0)
vv = cv2.Sobel(img_blurred, -1, 0, 1)
#vh = vh/np.max(vh)
#vv = vv/np.max(vv)
#v = np.sqrt(vv**2 + vh**2)
v = 0.5*vv + 0.5*vh
return v
def graphcut(self, label_costs, l=100):
num_classes = len(self.colors_present)
print(" Label costs "+str(label_costs.shape) + " num_classes "+str(num_classes))
#calculate pariwise potiential costs (distance between color classes)
pairwise_costs = np.zeros((num_classes, num_classes))
for ii in range(num_classes):
for jj in range(num_classes):
c1 = np.array(self.label_to_color_map[ii])
c2 = np.array(self.label_to_color_map[jj])
pairwise_costs[ii,jj] = np.linalg.norm(c1-c2)
label_costs_int32 = (100*label_costs).astype('int32')
pairwise_costs_int32 = (l*pairwise_costs).astype('int32')
vv_int32 = (self.g).astype('int32')
vh_int32 = (self.g).astype('int32')
new_labels = pygco.cut_simple_vh(label_costs_int32, pairwise_costs_int32, vv_int32, vh_int32, n_iter=10, algorithm='swap')
print("NEW LABELS " + str(new_labels.shape))
return new_labels
# use golden ratio
def generateSelfLabColorsFromHsv(self, count):
golden_ratio_conjugate = 0.618033988749895
ignore = randint(0, 20)
for i in range(ignore):
h = randint(1,360) # use random start value
s = uniform(0.01,0.9)
v = uniform(0.01,0.9)
print(" IGNORE "+str(ignore) + " starting h "+ str(h) + " s "+ str(s) + " v " + str(v))
rgbColors = np.zeros((1, count, 3), dtype=np.float)
for i in range(count):
r,g,b = colorsys.hsv_to_rgb(h, s, v)
rgbColors[0, i] = [r,g,b]
h += golden_ratio_conjugate + 1
lab = rgb2lab(rgbColors)
_,a,b = cv2.split(lab)
#print("LABB " + str(lab) + "\naaa " + str(a.flatten()) + "\nbbbb " + str(b.flatten()))
labColors =np.column_stack((a.flatten(),b.flatten()))
#print("SHAPE " + str(labColors.shape) + "\nfinal " + str(labColors))
return labColors
def generateSelfLabColors(self, count):
a = np.ones((count), dtype=float)
b = np.ones((count), dtype=float)
for i in range(count):
a[i] = a[i] * randint(0,127)
b[i] = b[i] * randint(128,255)
labColors =np.column_stack((a.flatten(),b.flatten()))
#print("SHAPE " + str(labColors.shape) + "\nfinal " + str(labColors))
return labColors
def train_kmeans(self, a, b, k):
pixel = np.squeeze(cv2.merge((a.flatten(),b.flatten())))
if (self.selfcolor == True):
self.centroids = self.generateSelfLabColors(k)
else:
self.centroids,_ = kmeans(pixel,k) # six colors will be found
qnt,_ = vq(pixel,self.centroids)
#print("CENTROIDS " + str(self.centroids))
#color-mapping lookup tables
self.color_to_label_map = {c:i for i,c in enumerate([tuple(i) for i in self.centroids])} #this maps the color pair to the index of the color
#print("color_to_label_map "+str(self.color_to_label_map))
self.label_to_color_map = dict(zip(self.color_to_label_map.values(),self.color_to_label_map.keys())) #takes a label and returns a,b
def quantize_kmeans(self, a, b):
w,h = np.shape(a)
# reshape matrix
pixel = np.reshape((cv2.merge((a,b))),(w * h,2))
# quantization
qnt,_ = vq(pixel,self.centroids)
# reshape the result of the quantization
centers_idx = np.reshape(qnt,(w,h))
clustered = self.centroids[centers_idx]
a_quant = clustered[:,:,0]
b_quant = clustered[:,:,1]
return a_quant, b_quant
| apache-2.0 |
youprofit/scikit-image | doc/examples/plot_medial_transform.py | 14 | 2220 | """
===========================
Medial axis skeletonization
===========================
The medial axis of an object is the set of all points having more than one
closest point on the object's boundary. It is often called the **topological
skeleton**, because it is a 1-pixel wide skeleton of the object, with the same
connectivity as the original object.
Here, we use the medial axis transform to compute the width of the foreground
objects. As the function ``medial_axis`` (``skimage.morphology.medial_axis``)
returns the distance transform in addition to the medial axis (with the keyword
argument ``return_distance=True``), it is possible to compute the distance to
the background for all points of the medial axis with this function. This gives
an estimate of the local width of the objects.
For a skeleton with fewer branches, there exists another skeletonization
algorithm in ``skimage``: ``skimage.morphology.skeletonize``, that computes
a skeleton by iterative morphological thinnings.
"""
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import medial_axis
import matplotlib.pyplot as plt
def microstructure(l=256):
"""
Synthetic binary data: binary microstructure with blobs.
Parameters
----------
l: int, optional
linear size of the returned image
"""
n = 5
x, y = np.ogrid[0:l, 0:l]
mask = np.zeros((l, l))
generator = np.random.RandomState(1)
points = l * generator.rand(2, n**2)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndi.gaussian_filter(mask, sigma=l/(4.*n))
return mask > mask.mean()
data = microstructure(l=64)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax1.axis('off')
ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
ax2.contour(data, [0.5], colors='w')
ax2.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
| bsd-3-clause |
Myasuka/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jakobworldpeace/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
dl1ksv/gnuradio | gr-digital/examples/snr_estimators.py | 6 | 5753 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
try:
import scipy
from scipy import stats
except ImportError:
print("Error: Program requires scipy (www.scipy.org).")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: Program requires Matplotlib (matplotlib.sourceforge.net).")
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in range(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0 / 2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in range(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0 / (float(N-1.0)))*ssum
mavg = (1.0 / (float(N-1.0)))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=list(gr_estimators.keys()), default="simple",
help="Estimator type {0} [default=%default]".format(
list(gr_estimators.keys())))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr / 10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx / scale
print("SNR: ", snr)
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx / scale)
snr0 = Sknown / Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0 / scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
johannfaouzi/pyts | doc/conf.py | 1 | 11170 | # -*- coding: utf-8 -*-
#
# project-template documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'numpydoc',
'pytsdtwdoc',
'sphinx_gallery.gen_gallery',
]
# Use svg images for math stuff
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_show_class_members = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = 'True'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyts'
copyright = u'2017-2021, Johann Faouzi and all pyts contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pyts import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Custom style
#html_style = 'css/pyts.css'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Logo and description
'description': 'A Python Package for Time Series Classification',
'logo': 'img/logo.png',
'logo_name': 'false',
'logo_text_align': 'center',
# GitHub stuff
'github_banner': 'true',
'github_repo': 'pyts',
'github_type': 'star',
'github_user': 'johannfaouzi',
# Page and sidebar widths
'page_width': '1300px',
'body_max_width': '850px',
'sidebar_width': '250px',
# Related links
'show_related': 'true',
'show_relbar_bottom': 'true',
# Font sizes
'font_size': '15px',
'code_font_size': '13px'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/img/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Custom CSS files
html_css_files = [
'custom.css',
]
# HTML context
#html_context = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_engine = 'pdflatex'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyts.tex', u'pyts Documentation',
u'Johann Faouzi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyts', u'pyts Documentation',
[u'Johann Faouzi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyts', u'pyts Documentation',
u'Johann Faouzi', 'pyts',
'A python package for time series transformation and classification',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'sklearn': ('https://scikit-learn.org/stable', None)
}
# sphinx-gallery configuration
sphinx_gallery_conf = {
'doc_module': 'pyts',
'backreferences_dir': os.path.join('generated'),
'reference_url': {
'pyts': None}
}
def setup(app):
app.add_stylesheet('custom.css')
app.add_javascript('js/copybutton.js')
# Filter Matplotlib warning
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/tests/test_common.py | 27 | 8388 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.decomposition import ProjectedGradientNMF
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check, name, Estimator
else:
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
estimator = Estimator()
else:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
if isinstance(estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check_transformer_n_iter, name, estimator
else:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# The ProjectedGradientNMF class is deprecated
if issubclass(Estimator, ProjectedGradientNMF):
with ignore_warnings():
yield check_get_params_invariance, name, Estimator
else:
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
pv/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
PanDAWMS/panda-server | pandaserver/configurator/db_interface.py | 1 | 14380 | #Standard python libraries
import sys
#Specific python libraries
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy import exc, func
#PanDA server libraries
from pandaserver.config import panda_config
from pandacommon.pandalogger.PandaLogger import PandaLogger
#Configurator libraries
from .models import Site, PandaSite, DdmEndpoint, Schedconfig, Jobsactive4, SiteStats, PandaDdmRelation
#Read connection parameters
__host = panda_config.dbhost
__user = panda_config.dbuser
__passwd = panda_config.dbpasswd
__dbname = panda_config.dbname
#Instantiate logger
_logger = PandaLogger().getLogger('configurator_dbif')
#Log the SQL produced by SQLAlchemy
__echo = False
#Create the SQLAlchemy engine
try:
if panda_config.backend == 'postgres':
if panda_config.dbport:
__host = '{}:{}'.format(__host, panda_config.dbport)
__engine = sqlalchemy.create_engine("postgresql://{}:{}@{}/{}".format(__user, __passwd, __host, __dbname),
echo=__echo, max_identifier_length=30)
else:
__engine = sqlalchemy.create_engine("oracle://%s:%s@%s"%(__user, __passwd, __host),
echo=__echo, max_identifier_length=30)
except exc.SQLAlchemyError:
_logger.critical("Could not load the DB engine: %s"%sys.exc_info())
raise
def get_session():
return sessionmaker(bind=__engine)()
def engine_dispose():
__engine.dispose()
# TODO: The performance of all write methods could significantly be improved by writing in bulks.
# The current implementation was the fastest way to get it done with the merge method and avoiding
# issues with duplicate keys
def write_sites_db(session, sites_list):
"""
Cache the AGIS site information in the PanDA database
"""
try:
_logger.debug("Starting write_sites_db")
for site in sites_list:
_logger.debug("Site: {0}".format(site['site_name']))
session.merge(Site(site_name = site['site_name'],
role = site['role'],
tier_level = site['tier_level']))
session.commit()
_logger.debug("Done with write_sites_db")
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('write_sites_db: Could not persist information --> {0}'.format(sys.exc_info()))
def write_panda_sites_db(session, panda_sites_list):
"""
Cache the AGIS panda site information in the PanDA database
"""
_logger.debug("Starting write_panda_sites_db")
for panda_site in panda_sites_list:
try:
_logger.debug("panda_site: {0}".format(panda_site['panda_site_name']))
session.merge(PandaSite(panda_site_name = panda_site['panda_site_name'],
site_name = panda_site['site_name'],
default_ddm_endpoint = None,
storage_site_name = None,
is_local = None))
session.commit()
_logger.debug("Done with write_panda_sites_db")
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('write_panda_sites_db: Could not persist information --> {0}'.format(sys.exc_info()))
def write_ddm_endpoints_db(session, ddm_endpoints_list):
"""
Cache the AGIS ddm endpoints in the PanDA database
"""
try:
_logger.debug("Starting write_ddm_endpoints_db")
for ddm_endpoint in ddm_endpoints_list:
session.merge(DdmEndpoint(ddm_endpoint_name = ddm_endpoint['ddm_endpoint_name'],
site_name = ddm_endpoint['site_name'],
ddm_spacetoken_name = ddm_endpoint['ddm_spacetoken_name'],
type = ddm_endpoint['type'],
is_tape = ddm_endpoint['is_tape'],
blacklisted = ddm_endpoint['blacklisted'],
blacklisted_write=ddm_endpoint['blacklisted_write'],
blacklisted_read=ddm_endpoint['blacklisted_read'],
space_used = ddm_endpoint['space_used'],
space_free = ddm_endpoint['space_free'],
space_total = ddm_endpoint['space_total'],
space_expired = ddm_endpoint['space_expired'],
space_timestamp = ddm_endpoint['space_timestamp']
)
)
session.commit()
_logger.debug("Done with write_ddm_endpoints_db")
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('write_ddm_endpoints_db: Could not persist information --> {0}'.format(sys.exc_info()))
def write_panda_ddm_relation_db(session, relation_list):
"""
Store the relationship between Panda sites and DDM endpoints
"""
try:
_logger.debug("Starting write_panda_ddm_relation_db")
# Reset the relations. Important to do this inside the transaction
session.query(PandaDdmRelation).delete()
# Insert the relations
for ddm_endpoint_dict in relation_list:
session.merge(PandaDdmRelation(panda_site_name=ddm_endpoint_dict['panda_site_name'],
ddm_endpoint_name=ddm_endpoint_dict['ddm_site'],
roles=ddm_endpoint_dict['roles'],
is_local=ddm_endpoint_dict['is_local'],
order_read=ddm_endpoint_dict['order_read'],
order_write=ddm_endpoint_dict['order_write'],
default_read=ddm_endpoint_dict['default_read'],
default_write=ddm_endpoint_dict['default_write'],
scope = ddm_endpoint_dict['scope']
)
)
# Finish the transactions
session.commit()
_logger.debug("Done with write_panda_ddm_relation_db")
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('write_panda_ddm_relation_db: Could not persist information --> {0}'.format(sys.exc_info()))
def read_panda_ddm_relation_schedconfig(session):
"""
Read the PanDA - DDM relationships from schedconfig
"""
try:
_logger.debug("Starting read_panda_ddm_relation_schedconfig")
schedconfig = session.query(Schedconfig.site, Schedconfig.siteid, Schedconfig.ddm).all()
relationship_tuples = []
for entry in schedconfig:
site = entry.site
panda_site = entry.siteid
# Schedconfig stores DDM endpoints as a comma separated string. Strip just in case
if entry.ddm:
ddm_endpoints = [ddm_endpoint.strip() for ddm_endpoint in entry.ddm.split(',')]
# Return the tuples and let the caller mingle it the way he wants
relationship_tuples.append((site, panda_site, ddm_endpoints))
_logger.debug("Done with read_panda_ddm_relation_schedconfig")
return relationship_tuples
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('read_panda_ddm_relation_schedconfig excepted --> {0}'.format(sys.exc_info()))
return []
def read_configurator_sites(session):
"""
Read the site names from the configurator tables
"""
try:
_logger.debug("Starting read_configurator_sites")
site_object_list = session.query(Site.site_name).all()
site_set = set([entry.site_name for entry in site_object_list])
_logger.debug("Done with read_configurator_sites")
return site_set
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('read_configurator_sites excepted --> {0}'.format(sys.exc_info()))
return set()
def read_configurator_panda_sites(session):
"""
Read the panda site names from the configurator tables
"""
try:
_logger.debug("Starting read_configurator_panda_sites")
panda_site_object_list = session.query(PandaSite.panda_site_name).all()
panda_site_set = set([entry.panda_site_name for entry in panda_site_object_list])
_logger.debug("Done with read_configurator_panda_sites")
return panda_site_set
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('read_configurator_panda_sites excepted --> {0}'.format(sys.exc_info()))
return set()
def read_configurator_ddm_endpoints(session):
"""
Read the DDM endpoint names from the configurator tables
"""
try:
_logger.debug("Starting read_configurator_ddm_endpoints")
ddm_endpoint_object_list = session.query(DdmEndpoint.ddm_endpoint_name).all()
ddm_endpoint_set = set([entry.ddm_endpoint_name for entry in ddm_endpoint_object_list])
_logger.debug("Done with read_configurator_ddm_endpoints")
return ddm_endpoint_set
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('read_configurator_ddm_endpoints excepted --> {0}'.format(sys.exc_info()))
return set()
def read_schedconfig_sites(session):
"""
Read the site names from the schedconfig table
"""
try:
_logger.debug("Starting read_schedconfig_sites")
site_object_list = session.query(Schedconfig.site).all()
site_set = set([entry.site for entry in site_object_list])
_logger.debug("Done with read_schedconfig_sites")
return site_set
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('read_schedconfig_sites excepted --> {0}'.format(sys.exc_info()))
return set()
def read_schedconfig_panda_sites(session):
"""
Read the panda site names from the schedconfig table
"""
try:
_logger.debug("Starting read_schedconfig_panda_sites")
panda_site_object_list = session.query(Schedconfig.siteid).all()
panda_site_set = set([entry.siteid for entry in panda_site_object_list])
_logger.debug("Done with read_schedconfig_panda_sites")
return panda_site_set
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('read_schedconfig_panda_sites excepted --> {0}'.format(sys.exc_info()))
return set()
def update_storage(session, ddm_endpoint_name, rse_usage):
"""
Updates the storage of a DDM endpoint
"""
try:
_logger.debug("Starting update_storage for {0} with usage {1}".format(ddm_endpoint_name, rse_usage))
ddm_endpoint = session.query(DdmEndpoint).filter(DdmEndpoint.ddm_endpoint_name==ddm_endpoint_name).one()
ddm_endpoint.space_total = rse_usage['total']
ddm_endpoint.space_free = rse_usage['free']
ddm_endpoint.space_used = rse_usage['used']
ddm_endpoint.space_expired = rse_usage['expired']
ddm_endpoint.space_timestamp = rse_usage['space_timestamp']
session.commit()
_logger.debug("Done with update_storage")
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('update_storage excepted --> {0}'.format(sys.exc_info()))
def delete_sites(session, sites_to_delete):
"""
Delete sites and all dependent entries (panda_sites, ddm_endpoints, panda_ddm_relations).
Deletion of dependent entries is done through cascade definition in models
"""
if not sites_to_delete:
_logger.debug("delete_sites: nothing to delete")
return
site_objects = session.query(Site).filter(Site.site_name.in_(sites_to_delete)).all()
for site_object in site_objects:
site_name = site_object.site_name
try:
_logger.debug('Going to delete site --> {0}'.format(site_name))
session.delete(site_object)
session.commit()
_logger.debug('Deleted site --> {0}'.format(site_name))
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('delete_sites excepted for site {0} with {1}'.format(site_name, sys.exc_info()))
return
def delete_panda_sites(session, panda_sites_to_delete):
"""
Delete PanDA sites and dependent entries in panda_ddm_relations
"""
if not panda_sites_to_delete:
_logger.debug("delete_panda_sites: nothing to delete")
return
panda_site_objects = session.query(PandaSite).filter(PandaSite.panda_site_name.in_(panda_sites_to_delete)).all()
for panda_site_object in panda_site_objects:
panda_site_name = panda_site_object.panda_site_name
try:
_logger.debug('Going to delete panda_site --> {0}'.format(panda_site_name))
session.delete(panda_site_object)
session.commit()
_logger.debug('Deleted panda_site --> {0}'.format(panda_site_name))
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('delete_panda_sites excepted for panda_site {0} with {1}'.format(panda_site_name, sys.exc_info()))
def delete_ddm_endpoints(session, ddm_endpoints_to_delete):
"""
Delete DDM endpoints dependent entries in panda_ddm_relations
"""
if not ddm_endpoints_to_delete:
_logger.debug("delete_ddm_endpoints: nothing to delete")
return
ddm_endpoint_objects = session.query(DdmEndpoint).filter(DdmEndpoint.ddm_endpoint_name.in_(ddm_endpoints_to_delete)).all()
for ddm_endpoint_object in ddm_endpoint_objects:
ddm_endpoint_name = ddm_endpoint_object.ddm_endpoint_name
try:
_logger.debug('Going to delete ddm_endpoint --> {0}'.format(ddm_endpoint_name))
session.delete(ddm_endpoint_object)
session.commit()
_logger.debug('Deleted ddm_endpoint --> {0}'.format(ddm_endpoint_name))
except exc.SQLAlchemyError:
session.rollback()
_logger.critical('delete_ddm_endpoints excepted for ddm_endpoint {0} with {1}'.format(ddm_endpoint_name, sys.exc_info()))
| apache-2.0 |
wazeerzulfikar/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 37 | 11979 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/resample/test_datetime_index.py | 1 | 51241 | from datetime import datetime, timedelta
from functools import partial
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
import pytz
from pandas.compat import StringIO, range
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Panel, Series, Timedelta, Timestamp, isna, notna
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, period_range
from pandas.core.resample import (
DatetimeIndex, TimeGrouper, _get_timestamp_range_edges)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import BDay, Minute
@pytest.fixture()
def _index_factory():
return date_range
@pytest.fixture
def _index_freq():
return 'Min'
@pytest.fixture
def _static_values(index):
return np.random.rand(len(index))
def test_custom_grouper(index):
dti = index
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10),
index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
@pytest.mark.parametrize(
'_index_start,_index_end,_index_name',
[('1/1/2000 00:00:00', '1/1/2000 00:13:00', 'index')])
@pytest.mark.parametrize('closed, expected', [
('right',
lambda s: Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range(
'1/1/2000', periods=4, freq='5min', name='index'))),
('left',
lambda s: Series(
[s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range(
'1/1/2000 00:05', periods=3, freq='5min', name='index'))
)
])
def test_resample_basic(series, closed, expected):
s = series
expected = expected(s)
result = s.resample('5min', closed=closed, label='right').mean()
assert_series_equal(result, expected)
def test_resample_basic_grouper(series):
s = series
result = s.resample('5Min').last()
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expected = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expected)
@pytest.mark.parametrize(
'_index_start,_index_end,_index_name',
[('1/1/2000 00:00:00', '1/1/2000 00:13:00', 'index')])
@pytest.mark.parametrize('keyword,value', [
('label', 'righttt'),
('closed', 'righttt'),
('convention', 'starttt')
])
def test_resample_string_kwargs(series, keyword, value):
# see gh-19303
# Check that wrong keyword argument strings raise an error
msg = "Unsupported value {value} for `{keyword}`".format(
value=value, keyword=keyword)
with pytest.raises(ValueError, match=msg):
series.resample('5min', **({keyword: value}))
@pytest.mark.parametrize(
'_index_start,_index_end,_index_name',
[('1/1/2000 00:00:00', '1/1/2000 00:13:00', 'index')])
def test_resample_how(series, downsample_method):
if downsample_method == 'ohlc':
pytest.skip('covered by test_resample_how_ohlc')
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
expected = s.groupby(grouplist).agg(downsample_method)
expected.index = date_range(
'1/1/2000', periods=4, freq='5min', name='index')
result = getattr(s.resample(
'5min', closed='right', label='right'), downsample_method)()
assert_series_equal(result, expected)
@pytest.mark.parametrize(
'_index_start,_index_end,_index_name',
[('1/1/2000 00:00:00', '1/1/2000 00:13:00', 'index')])
def test_resample_how_ohlc(series):
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = DataFrame(
s.groupby(grouplist).agg(_ohlc).values.tolist(),
index=date_range('1/1/2000', periods=4, freq='5min', name='index'),
columns=['open', 'high', 'low', 'close'])
result = s.resample('5min', closed='right', label='right').ohlc()
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'func', ['min', 'max', 'sum', 'prod', 'mean', 'var', 'std'])
def test_numpy_compat(func):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range(
'20130101', periods=5, freq='s'))
r = s.resample('2s')
msg = "numpy operations are not valid with resample"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(func, 1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(axis=1)
def test_resample_how_callables():
# GH#7929
data = np.arange(5, dtype=np.int64)
ind = date_range(start='2014-01-01', periods=len(data), freq='d')
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class FnClass(object):
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(FnClass())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_rounding():
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [
'date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s').sum()
expected = DataFrame({'value': [
4, 9, 4, 2
]}, index=date_range('2014-11-08', freq='6s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('7s').sum()
expected = DataFrame({'value': [
4, 10, 4, 1
]}, index=date_range('2014-11-08', freq='7s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('11s').sum()
expected = DataFrame({'value': [
11, 8
]}, index=date_range('2014-11-08', freq='11s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('13s').sum()
expected = DataFrame({'value': [
13, 6
]}, index=date_range('2014-11-08', freq='13s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('17s').sum()
expected = DataFrame({'value': [
16, 3
]}, index=date_range('2014-11-08', freq='17s', periods=2))
assert_frame_equal(result, expected)
def test_resample_basic_from_daily():
# from daily
dti = date_range(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun').last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/9/2005']
assert result.iloc[2] == s.iloc[-1]
result = s.resample('W-MON').last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s['1/3/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-TUE').last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s['1/4/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-WED').last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s['1/5/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-THU').last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s['1/6/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-FRI').last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s['1/7/2005']
assert result.iloc[1] == s['1/10/2005']
# to biz day
result = s.resample('B').last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/3/2005']
assert result.iloc[5] == s['1/9/2005']
assert result.index.name == 'index'
def test_resample_upsampling_picked_but_not_correct():
# Test for issue #3020
dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D').mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
s = Series(np.arange(1., 6), index=[datetime(
1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1., 6), index=date_range(
'19750101', periods=5, freq='D'))
result = s.resample('D').count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample('D').sum()
result2 = s.resample('D').mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic():
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A').mean()
assert_series_equal(result['A'], df['A'].resample('A').mean())
result = df.resample('M').mean()
assert_series_equal(result['A'], df['A'].resample('M').mean())
df.resample('M', kind='period').mean()
df.resample('W-WED', kind='period').mean()
@pytest.mark.parametrize('loffset', [timedelta(minutes=1),
'1min', Minute(1),
np.timedelta64(1, 'm')])
def test_resample_loffset(loffset):
# GH 7687
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=loffset).mean()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = date_range(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun').last()
business_day_offset = BDay()
expected = ser.resample('w-sun', loffset=-business_day_offset).last()
assert result.index[0] - business_day_offset == expected.index[0]
def test_resample_loffset_upsample():
# GH 20744
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=timedelta(minutes=1)).ffill()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[5], s[10], s[-1]],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
def test_resample_loffset_count():
# GH 12725
start_time = '1/1/2000 00:00:00'
rng = date_range(start_time, periods=100, freq='S')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('10S', loffset='1s').count()
expected_index = (
date_range(start_time, periods=10, freq='10S') +
timedelta(seconds=1)
)
expected = Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample('10S', loffset='1s').size()
assert_series_equal(result, expected)
def test_resample_upsample():
# from daily
dti = date_range(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min').pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == 'index'
def test_resample_how_method():
# GH9915
s = Series([11, 22],
index=[Timestamp('2015-03-31 21:48:52.672000'),
Timestamp('2015-03-31 21:49:52.739000')])
expected = Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[Timestamp('2015-03-31 21:48:50'),
Timestamp('2015-03-31 21:49:00'),
Timestamp('2015-03-31 21:49:10'),
Timestamp('2015-03-31 21:49:20'),
Timestamp('2015-03-31 21:49:30'),
Timestamp('2015-03-31 21:49:40'),
Timestamp('2015-03-31 21:49:50')])
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point():
# GH#9756
index = date_range(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A': Series([21, 41, 63], index=index)})
index = date_range(start='20150101', end='20150331', freq='B')
df = DataFrame(
{'A': Series(range(len(index)), index=index)}, dtype='int64')
result = df.resample('BM').last()
assert_frame_equal(result, expected)
def test_upsample_with_limit():
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').ffill(limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_nearest_upsample_with_limit():
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').nearest(limit=2)
expected = ts.reindex(result.index, method='nearest', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(series):
s = series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs['open'] == s[-6]
assert xs['high'] == s[-6:-1].max()
assert xs['low'] == s[-6:-1].min()
assert xs['close'] == s[-2]
xs = result.iloc[0]
assert xs['open'] == s[0]
assert xs['high'] == s[:5].max()
assert xs['low'] == s[:5].min()
assert xs['close'] == s[4]
def test_resample_ohlc_result():
# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)
a = s.loc[:'4-15-2000'].resample('30T').ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:'4-14-2000'].resample('30T').ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range('2013-12-30', '2014-01-07')
index = rng.drop([Timestamp('2014-01-01'),
Timestamp('2013-12-31'),
Timestamp('2014-01-04'),
Timestamp('2014-01-05')])
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample('B').mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B'))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe():
df = (
DataFrame({
'PRICE': {
Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {
Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H').ohlc()
exp = pd.concat([df['VOLUME'].resample('H').ohlc(),
df['PRICE'].resample('H').ohlc()],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H').ohlc()
exp.columns = pd.MultiIndex.from_tuples([
('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'),
('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'),
('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index():
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq='M')
for i in range(12)])
df.iloc[3, :] = np.nan
result = df.resample('Q', axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [
Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample():
dti = date_range(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right').mean()
result = bs.resample('8H').mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(simple_date_range_series):
ts = simple_date_range_series('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period').mean()
expected = ts.resample('A-DEC').mean()
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period').mean()
expected = ts.resample('A-JUN').mean()
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min():
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique():
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M').mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique():
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
msg = 'cannot reindex from a duplicate axis'
with pytest.raises(ValueError, match=msg):
ts.asfreq('B')
def test_resample_axis1():
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1).mean()
expected = df.T.resample('M').mean().T
tm.assert_frame_equal(result, expected)
def test_resample_panel():
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).mean()
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M').mean())
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2).mean()
expected = p_apply(panel2,
lambda x: x.resample('M', axis=1).mean())
tm.assert_panel_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_resample_panel_numpy():
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
expected = panel.resample('M', axis=1).mean()
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
expected = panel.resample('M', axis=2).mean()
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks():
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left').mean()
expected = ts.resample(freq, closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_single_group():
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D').apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base():
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2).mean()
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_daily_anchored():
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left').mean()
expected = ts.resample('D', closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet():
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period').mean()
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg():
# aggregate a period resampler with a lambda
s2 = Series(np.random.randint(0, 5, 50),
index=pd.period_range('2012-01-01', freq='H', periods=50),
dtype='float64')
expected = s2.to_timestamp().resample('D').mean().to_period()
result = s2.resample('D').agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault():
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)]
df = DataFrame.from_records(all_wins_and_wagers,
columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation():
# GH 12202
# validation tests for dtype preservation
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4, freq='W'),
'group': [1, 1, 2, 2],
'val': Series([5, 6, 7, 8],
dtype='int32')}
).set_index('date')
result = df.resample('1D').ffill()
assert result.val.dtype == np.int32
result = df.groupby('group').resample('1D').ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coerceion():
pytest.importorskip('scipy.interpolate')
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = (df.astype("float64")
.resample("H")
.mean()
["a"]
.interpolate("cubic")
)
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet():
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W').mean()
expected = ts.resample('W-SUN').mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error():
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('M')
def test_nanosecond_resample_error():
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
start=pd.to_datetime(start),
periods=10,
freq='100n'
)
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg('mean')
exp_indx = pd.date_range(
start=pd.to_datetime(exp_start),
periods=10,
freq='100n'
)
exp = Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(simple_date_range_series):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M').mean()
expected = df.resample(
'M', kind='period').mean().to_timestamp(how='end')
expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left').mean()
exp = df.tshift(1, freq='D').resample('M', kind='period').mean()
exp = exp.to_timestamp(how='end')
exp.index = exp.index + Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q').mean()
expected = df.resample(
'Q', kind='period').mean().to_timestamp(how='end')
expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left').mean()
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left').mean()
expected = expected.to_timestamp(how='end')
expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D')
tm.assert_frame_equal(result, expected)
ts = simple_date_range_series('2012-04-29 23:00', '2012-04-30 5:00',
freq='h')
resampled = ts.resample('M').mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(simple_date_range_series):
ts = simple_date_range_series('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday():
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
) | pd.date_range(
'2014-10-15 23:00:00', periods=2, freq='2200L')
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample('2200L').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:02.000')
# Ensure right closing works
result = s.resample('2200L', label='right').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:04.200')
def test_corner_cases(simple_period_range_series,
simple_date_range_series):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left').mean()
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
tm.assert_index_equal(result.index, ex_index)
len0pts = simple_period_range_series(
'2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC').mean()
assert len(result) == 0
# resample to periods
ts = simple_date_range_series(
'2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period').mean()
assert len(result) == 1
assert result.index[0] == Period('2000-04', freq='M')
def test_anchored_lowercase_buglet():
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d').mean()
def test_upsample_apply_functions():
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min').aggregate(['mean', 'sum'])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic():
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D').sum()
exp = ts.sort_index().resample('D').sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688():
for dtype in ['int64', 'int32', 'float64', 'float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(simple_date_range_series):
ts = simple_date_range_series('1/1/2000', '4/1/2000')
result = ts.resample('M').apply(lambda x: x.mean())
exp = ts.resample('M').mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample('M').mean()
foo_exp.name = 'foo'
bar_exp = ts.resample('M').std()
bar_exp.name = 'bar'
result = ts.resample('M').apply(
[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ['foo', 'bar']
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times():
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS').sum()
def test_resample_consistency():
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range('2002-02-02', periods=4, freq='30T')
s = Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min').bfill(limit=2)
r10 = s.resample('10Min').bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper():
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M').count()
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31',
'2014-09-30',
'2014-10-31', '2014-11-30'],
freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(
len(dates))))
result = df.set_index('A').resample('M').count()
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_nunique():
# GH 12352
df = DataFrame({
'ID': {Timestamp('2015-06-05 00:00:00'): '0010100903',
Timestamp('2015-06-08 00:00:00'): '0010150847'},
'DATE': {Timestamp('2015-06-05 00:00:00'): '2015-06-05',
Timestamp('2015-06-08 00:00:00'): '2015-06-08'}})
r = df.resample('D')
g = df.groupby(pd.Grouper(freq='D'))
expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x:
x.nunique())
assert expected.name == 'ID'
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample('D').nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq='D')).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_with_date_gap():
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h')
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype='int64')
r = s.resample('M')
# Since all elements are unique, these should all be the same
results = [
r.count(),
r.nunique(),
r.agg(Series.nunique),
r.agg('nunique')
]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
@pytest.mark.parametrize('n', [10000, 100000])
@pytest.mark.parametrize('k', [10, 100, 1000])
def test_resample_group_info(n, k):
# GH10914
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1,
minlength=len(ix)).astype('int64', copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size():
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T').size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',
copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst():
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule='H').sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_groupby_with_dst_time_change():
# GH 24972
index = pd.DatetimeIndex([1478064900001000000, 1480037118776792000],
tz='UTC').tz_convert('America/Chicago')
df = pd.DataFrame([1, 2], index=index)
result = df.groupby(pd.Grouper(freq='1d')).last()
expected_index_values = pd.date_range('2016-11-02', '2016-11-24',
freq='d', tz='America/Chicago')
index = pd.DatetimeIndex(expected_index_values)
expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_dst_anchor():
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D').sum(),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS').sum()
assert_frame_equal(
df.resample(rule='MS').sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min',
tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values,
"b": values,
"c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})
[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def test_downsample_across_dst():
# GH 8531
tz = pytz.timezone('Europe/Berlin')
dt = datetime(2014, 10, 26)
dates = date_range(tz.localize(dt), periods=4, freq='2H')
result = Series(5, index=dates).resample('H').mean()
expected = Series([5., np.nan] * 3 + [5.],
index=date_range(tz.localize(dt), periods=7,
freq='H'))
tm.assert_series_equal(result, expected)
def test_downsample_across_dst_weekly():
# GH 9119, GH 21459
df = DataFrame(index=DatetimeIndex([
'2017-03-25', '2017-03-26', '2017-03-27',
'2017-03-28', '2017-03-29'
], tz='Europe/Amsterdam'),
data=[11, 12, 13, 14, 15])
result = df.resample('1W').sum()
expected = DataFrame([23, 42], index=pd.DatetimeIndex([
'2017-03-26', '2017-04-02'
], tz='Europe/Amsterdam'))
tm.assert_frame_equal(result, expected)
idx = pd.date_range("2013-04-01", "2013-05-01", tz='Europe/London',
freq='H')
s = Series(index=idx)
result = s.resample('W').mean()
expected = Series(index=pd.date_range(
'2013-04-07', freq='W', periods=5, tz='Europe/London'
))
tm.assert_series_equal(result, expected)
def test_resample_with_nat():
# GH 13020
index = DatetimeIndex([pd.NaT,
'1970-01-01 00:00:00',
pd.NaT,
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample('1s').mean(), frame_1s)
index_2s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:02'])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample('2s').mean(), frame_2s)
index_3s = DatetimeIndex(['1970-01-01 00:00:00'])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample('3s').mean(), frame_3s)
assert_frame_equal(frame.resample('60s').mean(), frame_3s)
def test_resample_datetime_values():
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({'timestamp': dates}, index=dates)
exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range('2016-01-15', periods=3, freq='2D'),
name='timestamp')
res = df.resample('2D').first()['timestamp']
tm.assert_series_equal(res, exp)
res = df['timestamp'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_apply_with_additional_args(series):
# GH 14615
def f(data, add_arg):
return np.mean(data) * add_arg
multiplier = 10
result = series.resample('D').apply(f, multiplier)
expected = series.resample('D').mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing as kwarg
result = series.resample('D').apply(f, add_arg=multiplier)
expected = series.resample('D').mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing dataframe
df = pd.DataFrame({"A": 1, "B": 2},
index=pd.date_range('2017', periods=10))
result = df.groupby("A").resample("D").agg(f, multiplier)
expected = df.groupby("A").resample('D').mean().multiply(multiplier)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('k', [1, 2, 3])
@pytest.mark.parametrize('n1, freq1, n2, freq2', [
(30, 'S', 0.5, 'Min'),
(60, 'S', 1, 'Min'),
(3600, 'S', 1, 'H'),
(60, 'Min', 1, 'H'),
(21600, 'S', 0.25, 'D'),
(86400, 'S', 1, 'D'),
(43200, 'S', 0.5, 'D'),
(1440, 'Min', 1, 'D'),
(12, 'H', 0.5, 'D'),
(24, 'H', 1, 'D'),
])
def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
# GH 24127
n1_ = n1 * k
n2_ = n2 * k
s = pd.Series(0, index=pd.date_range('19910905 13:00',
'19911005 07:00',
freq=freq1))
s = s + range(len(s))
result1 = s.resample(str(n1_) + freq1).mean()
result2 = s.resample(str(n2_) + freq2).mean()
assert_series_equal(result1, result2)
@pytest.mark.parametrize('first,last,offset,exp_first,exp_last', [
('19910905', '19920406', 'D', '19910905', '19920407'),
('19910905 00:00', '19920406 06:00', 'D', '19910905', '19920407'),
('19910905 06:00', '19920406 06:00', 'H', '19910905 06:00',
'19920406 07:00'),
('19910906', '19920406', 'M', '19910831', '19920430'),
('19910831', '19920430', 'M', '19910831', '19920531'),
('1991-08', '1992-04', 'M', '19910831', '19920531'),
])
def test_get_timestamp_range_edges(first, last, offset,
exp_first, exp_last):
first = pd.Period(first)
first = first.to_timestamp(first.freq)
last = pd.Period(last)
last = last.to_timestamp(last.freq)
exp_first = pd.Timestamp(exp_first, freq=offset)
exp_last = pd.Timestamp(exp_last, freq=offset)
offset = pd.tseries.frequencies.to_offset(offset)
result = _get_timestamp_range_edges(first, last, offset)
expected = (exp_first, exp_last)
assert result == expected
| bsd-3-clause |
pianomania/scikit-learn | sklearn/cluster/birch.py | 23 | 23648 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
erdc-cm/air-water-vv | 2d/hydraulicStructures/sluice_gate/postprocess/dischargePlot_sluice.py | 1 | 2656 | from numpy import *
from scipy import *
from pylab import *
import collections as cll
import csv
import os
import matplotlib.pyplot as plt
#####################################################################################
## Reading probes into the file
folder = "../output"
os.chdir(folder)
filename='combined_column_gauge.csv'
def readProbeFile(filename):
with open (filename, 'rb') as csvfile:
data=np.loadtxt(csvfile, delimiter=",",skiprows=1)
time=data[:,0]
data = data[:,1:]
csvfile.seek(0)
header = csvfile.readline()
header = header.replace("time","")
header = header.replace("[","")
header = header.replace("]","")
header = header.replace(","," ")
header = header.split()
probeType = []
probex = []
probey = []
probez = []
for ii in range(0,len(header),4):
probeType.append(header[ii])
probex.append(float(header[ii+1]))
probey.append(float(header[ii+2]))
probez.append(float(header[ii+3]))
probeCoord = zip(np.array(probex),np.array(probey),np.array(probez))
datalist = [probeType,probeCoord,time,data]
return datalist
#####################################################################################
# Extracts the datas from the function readProbeFile
datalist = readProbeFile(filename)
time = datalist[2]
# Calculates the time-average discharge over the crest
U = []
for i in range(0,len(datalist[3])):
U.append(np.mean(datalist[3][i]))
U = np.array(U)
Q = U*0.25
#####################################################################################
# Plotting the probes
plt.plot(time, Q)
plt.xlabel('time [sec]')
plt.ylabel('Q [m^2/s]')
plt.suptitle('Time-averaged discharge under the gate of the sluice gate')
plt.ylim((0.6,1.4))
plt.xlim((-1.0,30.0))
plt.grid(True)
plt.savefig('CrumpWeir_discharge.png')
plt.show()
#####################################################################################
# Validation of the result
Q_th = 1.037 #Theoretical discharge between 20 s and 30 s
T = time.tolist()
T_20 = T.index(20.0)
T_30 = T.index(30.0)
T_20_to_30 = []
for i in range(T_20,T_30):
T_20_to_30.append(Q[i])
Q_pr = np.mean(T_20_to_30) #Discharge between 20 s and 30 s obtained with PROTEUS
err = 100*abs(Q_th-Q_pr)/Q_th
val = open('validation_discharge_sluice.txt', 'w')
val.write('Gauges taken under the gate.'+'\n')
val.write('Average discharge between 20 s and 30 s.'+'\n')
val.write('Theory'+'\t'+'Simulation'+'\t'+'Error (%)'+'\n')
val.write(str(Q_th)+'\t'+str(Q_pr)+'\t'+str(err))
val.close()
| mit |
Tarskin/LaCyTools | LaCyTools.py | 1 | 178953 | #! /usr/bin/env python
#
# Copyright 2014-2016 Bas C. Jansen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a coyp of the Apache 2.0 license along
# with this program; if not, see
# http://www.apache.org/licenses/LICENSE-2.0
from datetime import datetime
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk
)
from scipy.interpolate import InterpolatedUnivariateSpline
import scipy.optimize
from scipy.optimize import curve_fit
#from Tkinter import *
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import base64
import collections
import glob
import itertools
import linecache
import math
import matplotlib.pyplot as plt
import matplotlib
import numpy
import os
import struct
import sys
import zlib
import tables
# Dev Imports
#import timeit
#import inspect
tables.parameters.MAX_NUMEXPR_THREADS = None
tables.parameters.MAX_BLOSC_THREADS = None
# File Parameters
EXTENSION = ".mzXML" # File types that will be used by MassyTools
EXTRACTION = "aligned" # Pre-fix required for files to be extracted
OUTPUT = "Summary.txt" # Name of the output file
SETTINGS_FILE = "Settings.txt" # Name of the settings file
OVERWRITE_ANALYTES = True # This option specifies if LaCyTools should overwrite an existing analytes.ref file or not
# Alignment Parameters
ALIGNMENT_TIME_WINDOW = 10 # The +/- time window that the program is allowed to look for the feature for alignment (EIC time axis)
ALIGNMENT_MASS_WINDOW = 0.1 # The +/- m/z window (not charge state corrected) that is used to detect the feature used for alignment. Afterwards a spline fit is used to detect the measured time
ALIGNMENT_BACKGROUND_MULTIPLIER = 2 # The multiplier of the timewindow used for background determination
ALIGNMENT_S_N_CUTOFF = 9 # The minimum S/N value of a feature to be used for alignment
ALIGNMENT_MIN_PEAK = 5 # The minimum number of features used for alignment
# Calibration Parameters
SUM_SPECTRUM_RESOLUTION = 100 # Number of data points per 1 whole m/z unit
CALIB_MASS_WINDOW = 0.5 # This +/- mass window (in Dalton) used to detect the accurate mass of a calibra
CALIB_S_N_CUTOFF = 9 # The minimum S/N value of a feature to be used for calibration
CALIB_MIN_PEAK = 3 # Minimum number of calibrants
# PARAMETERS
MASS_MODIFIERS = [] # The mass modifiers refer to changes to the analyte
CHARGE_CARRIER = ['proton'] # The charge carrier that is used for ionization
# Extraction Parameters
EXTRACTION_TYPE = 2 # 1 = Max, 0 = Total and 2 = Area
MASS_WINDOW = 0.2 # The +/- m/z window used around each feature for extraction
TIME_WINDOW = 8 # The +/- time window that will be used around a cluster, to create the sum spectrum
EXTRACTION_PADDING = 2 # total number of additional windows to be examined and quantified (for IPQ)
MIN_CHARGE = 2 # The minimum charge state that the program will integrate for all features (unless overwritten in the composition file)
MAX_CHARGE = 3 # The maximum charge state that the program will integrate for all features (unless overwritten in the composition file)
#MIN_CONTRIBUTION = 0.01 # Minimum contribution to isotopic distrubition to be included (NOT BEING USED ATM)
MIN_TOTAL = 0.95 # Desired contribution of extracted isotopes of total isotopic pattern
BACKGROUND_WINDOW = 10 # Total m/z window (+ and -) to search for background
S_N_CUTOFF = 9 # Minimum signal to noise value of an analyte to be included in the percentage QC
# The maximum distance between distinct isotopic masses to be 'pooled'
EPSILON = 0.5 # DO NOT TOUCH THIS UNLESS YOU KNOW WTF YOU ARE DOING! Read below if you truly want to know the meaning:
# This value represents the maximum distance (in Da) for which the element specific isotopic mass defect will be combined
# Isotopic Mass Differences
C = [('13C',0.0107,1.00335)]
H = [('2H',0.00012,1.00628)]
N = [('15N',0.00364,0.99703)]
O18 = [('18O',0.00205,2.00425)]
O17 = [('17O',0.00038,1.00422)]
S33 = [('33S',0.0076,0.99939)]
S34 = [('34S',0.0429,1.9958)]
S36 = [('36S',0.0002,3.99501)]
# Read the building blocks
# TODO: Move this inside the app
BLOCKS = {}
for file in glob.glob("./blocks/*.block"):
block = os.path.splitext(os.path.basename(file))[0]
keys = []
values = []
with open(file,'r') as fr:
for line in fr:
key, value = line.rstrip().split()
keys.append(key)
try:
value = int(value)
except ValueError:
value = float(value)
values.append(value)
BLOCKS[block] = dict(zip(keys,values))
# Verify the blocks
for k,v in BLOCKS.items():
try:
if type(v['mass']) != float:
raise TypeError('Mass is not a float.')
if type(v['carbons']) != int:
raise TypeError('Carbons is not an integer.')
if type(v['hydrogens']) != int:
raise TypeError('Hydrogens is not an integer.')
if type(v['nitrogens']) != int:
raise TypeError('Nitrogens is not an integer.')
if type(v['oxygens']) != int:
raise TypeError('Oxygens is not an integer.')
if type(v['sulfurs']) != int:
raise TypeError('Sulfurs is not an integer.')
if type(v['available_for_charge_carrier']) != int:
raise TypeError('Charge carrier is not an integer.')
if v['available_for_charge_carrier'] not in [0,1]:
raise TypeError('Charge carrier is not 0 or 1.')
except:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Block Error","An error was observed in block "+str(k)+
". Please correct this block before running LaCyTools again.")
sys.exit()
UNITS = BLOCKS.keys()
###################
# DATA STRUCTURES #
###################
class Analyte():
def __init__(self):
self.composition = None
self.mass = None
self.massWindow = None
self.time = None
self.timeWindow = None
self.minCharge = None
self.maxCharge = None
self.isotopes = None
class Isotope():
def __init__(self):
self.isotope = None
self.charge = None
self.mass = None
self.obsInt = None
self.obsMax = None
self.expInt = None
self.qc = None
self.background = None
self.backgroundPoint = None
self.noise = None
################################################################################################
# Tooltip code - Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml #
################################################################################################
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
wraplength=500, font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
###############################
# Start of actual application #
###############################
class App():
def __init__(self,master):
# VARIABLES
self.master = master
self.version = "1.1.0-alpha"
self.build = "190207b"
self.inputFile = ""
self.inputFileIdx = 0
self.refFile = ""
self.alFile = ""
self.calFile = tk.IntVar()
self.ptFile = None
self.rmMZXML = tk.IntVar()
self.batchFolder = ""
self.batchProcessing = 0
self.batchWindow = 0
self.dataWindow = 0
self.outputWindow = 0
self.analyteIntensity = tk.IntVar()
self.analyteRelIntensity = tk.IntVar()
self.analyteBackground = tk.IntVar()
self.analyteNoise = tk.IntVar()
self.analytePerCharge = tk.IntVar()
self.analyteBckSub = tk.IntVar()
self.normalizeCluster = tk.IntVar()
self.alignmentQC = tk.IntVar()
self.qualityControl = tk.IntVar()
self.spectraQualityControl = tk.IntVar()
self.log = True
# Background can be determined in two ways
# Options are 'MIN', 'MEDIAN' and 'NOBAN'
self.background = "MIN"
# Nose can be determined in multiple ways
# Options are 'RMS' and 'MM'
self.noise = "RMS"
self.fig = matplotlib.figure.Figure(figsize=(12, 6))
# Attempt to retrieve previously saved settings from settingsfile
if os.path.isfile('./'+str(SETTINGS_FILE)):
self.getSettings()
# The LacyTools Logo (Placeholder figure)
if os.path.isfile('./UI/LaCyTools.png'):
background_image = self.fig.add_subplot(111)
image = matplotlib.image.imread('./ui/LaCyTools.png')
background_image.axis('off')
self.fig.set_tight_layout(True)
background_image.imshow(image)
# The Canvas
self.canvas = FigureCanvasTkAgg(self.fig, master = master)
self.toolbar = NavigationToolbar2Tk(self.canvas, root)
self.canvas.get_tk_widget().pack(fill=tk.BOTH,expand=tk.YES)
self.canvas.draw()
# FRAME
frame = tk.Frame(master)
master.title("LaCyTools")
# MENU
menu = tk.Menu(root)
root.config(menu = menu)
filemenu = tk.Menu(menu,tearoff=0)
menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Open Input File", command = self.openFile)
extractmenu = tk.Menu(menu,tearoff=0)
menu.add_cascade(label="Extraction", menu=extractmenu)
extractmenu.add_command(label="Open ref file", command = self.openRefFile)
extractmenu.add_command(label="Extract", command = self.extractData)
menu.add_command(label="Batch Process", command = lambda: self.batchPopup(self))
menu.add_command(label="Data Storage", command = lambda: self.dataPopup(self))
menu.add_command(label="Settings", command = lambda: self.settingsPopup(self))
def selectIsotopes(self, results):
""" TODO
"""
# Sort by increasing m/z (x[0])
results.sort(key=lambda x: x[0])
# Add index of the isotope
foo = []
for index,i in enumerate(results):
foo.append((i[0],i[1],index))
# Sort by decreasing fraction (x[1])
results = foo
results.sort(key=lambda x: x[1], reverse=True)
contribution = 0.0
foo = []
# Take only the highest fraction isotopes until the contribution
# exceeds the MIN_TOTAL
for i in results:
contribution += float(i[1])
foo.append(i)
if contribution > MIN_TOTAL:
break
results = foo
# Sort by increasing m/z (x[0])
results.sort(key=lambda x: x[0])
return results
def settingsPopup(self,master):
""" This function creates a window in which the user can change
all the parameters that are for normal use. Certain advanced
settings such as the extraction type and noise determination
method remain hidden from the user through this window.
"""
def close(self):
""" This function closes the settings popup and applies
all the entered values to the parameters.
"""
global ALIGNMENT_TIME_WINDOW
global ALIGNMENT_MASS_WINDOW
global ALIGNMENT_S_N_CUTOFF
global ALIGNMENT_MIN_PEAK
global CALIB_MASS_WINDOW
global CALIB_S_N_CUTOFF
global CALIB_MIN_PEAK
global SUM_SPECTRUM_RESOLUTION
global MASS_WINDOW
global TIME_WINDOW
global MIN_CHARGE
global MAX_CHARGE
global CHARGE_CARRIER
global MIN_TOTAL
global BACKGROUND_WINDOW
global S_N_CUTOFF
global EXTRACTION_PADDING
ALIGNMENT_TIME_WINDOW = float(self.alignTimeWindow.get())
ALIGNMENT_MASS_WINDOW = float(self.alignMassWindow.get())
ALIGNMENT_S_N_CUTOFF = int(self.alignSn.get())
ALIGNMENT_MIN_PEAK = int(self.alignMin.get())
CALIB_MASS_WINDOW = float(self.calibMassWindow.get())
CALIB_S_N_CUTOFF = int(self.calibSn.get())
CALIB_MIN_PEAK = int(self.calibMin.get())
SUM_SPECTRUM_RESOLUTION = int(self.sumSpec.get())
MASS_WINDOW = float(self.extracMassWindow.get())
TIME_WINDOW = float(self.extracTimeWindow.get())
MIN_CHARGE = int(self.extracMinCharge.get())
MAX_CHARGE = int(self.extracMaxCharge.get())
EXTRACTION_PADDING = int(self.extracPad.get())
CHARGE_CARRIER = []
for i in UNITS:
if str(i) == master.chargeCarrierVar.get() and BLOCKS[i]['available_for_charge_carrier'] == 1:
CHARGE_CARRIER.append(i)
MIN_TOTAL = float(self.extracMinTotal.get())
BACKGROUND_WINDOW = int(self.extracBack.get())
S_N_CUTOFF = int(self.extracSnCutoff.get())
master.measurementWindow = 0
top.destroy()
def save(self):
""" This function saves all changed settings to the
settings file.
"""
global CHARGE_CARRIER
CHARGE_CARRIER = []
for i in UNITS:
if str(i) == master.chargeCarrierVar.get() and BLOCKS[i]['available_for_charge_carrier'] == 1:
CHARGE_CARRIER.append(i)
with open(SETTINGS_FILE,'w') as fw:
fw.write("ALIGNMENT_TIME_WINDOW\t"+str(float(self.alignTimeWindow.get()))+"\n")
fw.write("ALIGNMENT_MASS_WINDOW\t"+str(float(self.alignMassWindow.get()))+"\n")
fw.write("ALIGNMENT_S_N_CUTOFF\t"+str(int(self.alignSn.get()))+"\n")
fw.write("ALIGNMENT_MIN_PEAK\t"+str(int(self.alignMin.get()))+"\n")
fw.write("CALIB_MASS_WINDOW\t"+str(float(self.calibMassWindow.get()))+"\n")
fw.write("CALIB_S_N_CUTOFF\t"+str(int(self.calibSn.get()))+"\n")
fw.write("CALIB_MIN_PEAK\t"+str(int(self.calibMin.get()))+"\n")
fw.write("SUM_SPECTRUM_RESOLUTION\t"+str(int(self.sumSpec.get()))+"\n")
fw.write("MASS_WINDOW\t"+str(float(self.extracMassWindow.get()))+"\n")
fw.write("TIME_WINDOW\t"+str(float(self.extracTimeWindow.get()))+"\n")
fw.write("MIN_CHARGE\t"+str(int(self.extracMinCharge.get()))+"\n")
fw.write("MAX_CHARGE\t"+str(int(self.extracMaxCharge.get()))+"\n")
fw.write("CHARGE_CARRIER\t"+str(CHARGE_CARRIER[0])+"\n")
fw.write("MIN_TOTAL\t"+str(float(self.extracMinTotal.get()))+"\n")
fw.write("BACKGROUND_TOTAL\t"+str(int(self.extracBack.get()))+"\n")
fw.write("S_N_CUTOFF\t"+str(int(self.extracSnCutoff.get()))+"\n")
fw.write("EXTRACTION_PADDING\t"+str(int(self.extracPad.get()))+"\n")
master.measurementWindow = 1
top = self.top = tk.Toplevel()
self.chargeCarrierVar = tk.StringVar()
self.chargeCarrierVar.set(CHARGE_CARRIER[0])
options = []
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.alignmentLabel = tk.Label(top, text="Alignment parameters", font="bold")
self.alignmentLabel.grid(row=0, columnspan=2, sticky=tk.W)
self.alignTimeWindowLabel = tk.Label(top, text="Alignment time window")
self.alignTimeWindowLabel.grid(row=1, column=0, sticky=tk.W)
self.alignTimeWindow = tk.Entry(top)
self.alignTimeWindow.insert(0, ALIGNMENT_TIME_WINDOW)
self.alignTimeWindow.grid(row=1, column=1, sticky=tk.W)
self.alignMassWindowLabel = tk.Label(top, text="Alignment m/z window")
self.alignMassWindowLabel.grid(row=2, column=0, sticky=tk.W)
self.alignMassWindow = tk.Entry(top)
self.alignMassWindow.insert(0, ALIGNMENT_MASS_WINDOW)
self.alignMassWindow.grid(row=2, column=1, sticky=tk.W)
self.alignSnLabel = tk.Label(top, text="Minimal S/N for alignment")
self.alignSnLabel.grid(row=3, column=0, sticky=tk.W)
self.alignSn = tk.Entry(top)
self.alignSn.insert(0, ALIGNMENT_S_N_CUTOFF)
self.alignSn.grid(row=3, column=1, sticky=tk.W)
self.alignMinLabel = tk.Label(top, text="Minimal features for alignment")
self.alignMinLabel.grid(row=4, column=0, sticky=tk.W)
self.alignMin = tk.Entry(top)
self.alignMin.insert(0, ALIGNMENT_MIN_PEAK)
self.alignMin.grid(row=4, column=1, sticky=tk.W)
self.calibrationLabel = tk.Label(top, text="Calibration parameters", font="bold")
self.calibrationLabel.grid(row=5, columnspan=2, sticky=tk.W)
self.calibMassWindowLabel = tk.Label(top, text="Calibration mass window")
self.calibMassWindowLabel.grid(row=6, column=0, sticky=tk.W)
self.calibMassWindow = tk.Entry(top)
self.calibMassWindow.insert(0, CALIB_MASS_WINDOW)
self.calibMassWindow.grid(row=6, column=1, sticky=tk.W)
self.calibSnLabel = tk.Label(top, text="Minimal S/N for calibration")
self.calibSnLabel.grid(row=7, column=0, sticky=tk.W)
self.calibSn = tk.Entry(top)
self.calibSn.insert(0, CALIB_S_N_CUTOFF)
self.calibSn.grid(row=7, column=1, sticky=tk.W)
self.calibMinLabel = tk.Label(top, text="Minimal number of calibrants")
self.calibMinLabel.grid(row=8, column=0, sticky=tk.W)
self.calibMin = tk.Entry(top)
self.calibMin.insert(0, CALIB_MIN_PEAK)
self.calibMin.grid(row=8, column=1, sticky=tk.W)
self.extractionLabel = tk.Label(top, text="Extraction parameters", font="bold")
self.extractionLabel.grid(row=9, columnspan=2, sticky=tk.W)
self.sumSpecLabel = tk.Label(top, text="Data points per 1 m/z")
self.sumSpecLabel.grid(row=10, column=0, sticky=tk.W)
self.sumSpec = tk.Entry(top)
self.sumSpec.insert(0, SUM_SPECTRUM_RESOLUTION)
self.sumSpec.grid(row=10, column=1, sticky=tk.W)
self.extracMassWindowLabel = tk.Label(top, text="Extraction m/z window")
self.extracMassWindowLabel.grid(row=12, column=0, sticky=tk.W)
self.extracMassWindow = tk.Entry(top)
self.extracMassWindow.insert(0, MASS_WINDOW)
self.extracMassWindow.grid(row=12, column=1, sticky=tk.W)
self.extracTimeWindowLabel = tk.Label(top, text="Extraction time window")
self.extracTimeWindowLabel.grid(row=13, column=0, sticky=tk.W)
self.extracTimeWindow = tk.Entry(top)
self.extracTimeWindow.insert(0, TIME_WINDOW)
self.extracTimeWindow.grid(row=13, column=1, sticky=tk.W)
self.extracPadLabel = tk.Label(top, text="Extraction window padding")
self.extracPadLabel.grid(row=14, column=0, sticky=tk.W)
self.extracPad = tk.Entry(top)
self.extracPad.insert(0, EXTRACTION_PADDING)
self.extracPad.grid(row=14, column=1, sticky=tk.W)
self.extracMinChargeLabel = tk.Label(top, text="Minimum charge state")
self.extracMinChargeLabel.grid(row=15, column=0, sticky=tk.W)
self.extracMinCharge = tk.Entry(top)
self.extracMinCharge.insert(0, MIN_CHARGE)
self.extracMinCharge.grid(row=15, column=1, sticky=tk.W)
self.extracMaxChargeLabel = tk.Label(top, text="Maximum charge state")
self.extracMaxChargeLabel.grid(row=16, column=0, sticky=tk.W)
self.extracMaxCharge = tk.Entry(top)
self.extracMaxCharge.insert(0, MAX_CHARGE)
self.extracMaxCharge.grid(row=16, column=1, sticky=tk.W)
for i in UNITS:
if BLOCKS[i]['available_for_charge_carrier'] == 1:
options.append(i)
self.chargeCarrierLabel = tk.Label(top, text="Charge carrier")
self.chargeCarrierLabel.grid(row=17, column=0, sticky=tk.W)
self.chargeCarrier = tk.OptionMenu(top, self.chargeCarrierVar, *options)
self.chargeCarrier.grid(row=17, column=1, sticky=tk.W)
self.extracMinTotalLabel = tk.Label(top, text="Minimum isotopic fraction")
self.extracMinTotalLabel.grid(row=18, column=0, sticky=tk.W)
self.extracMinTotal = tk.Entry(top)
self.extracMinTotal.insert(0, MIN_TOTAL)
self.extracMinTotal.grid(row=18, column=1, sticky=tk.W)
self.extracBackLabel = tk.Label(top, text="Background detection window")
self.extracBackLabel.grid(row=19, column=0, sticky=tk.W)
self.extracBack = tk.Entry(top)
self.extracBack.insert(0, BACKGROUND_WINDOW)
self.extracBack.grid(row=19, column=1, sticky=tk.W)
self.extracSnCutoffLabel = tk.Label(top, text="Spectra QC S/N cutoff")
self.extracSnCutoffLabel.grid(row=20, column=0, sticky=tk.W)
self.extracSnCutoff = tk.Entry(top)
self.extracSnCutoff.insert(0, S_N_CUTOFF)
self.extracSnCutoff.grid(row=20,column=1, sticky=tk.W)
self.ok = tk.Button(top,text = 'Ok', command = lambda: close(self))
self.ok.grid(row = 21, column = 0, sticky = tk.W)
self.save = tk.Button(top, text = 'Save', command = lambda: save(self))
self.save.grid(row = 21, column = 1, sticky = tk.E)
# Tooltips
createToolTip(self.alignTimeWindowLabel,"The time window in seconds around the specified time of an alignment feature that LaCyTools is allowed to look for the maximum intensity of each feature.")
createToolTip(self.alignMassWindowLabel,"The m/z window in Thompson around the specified exact m/z of an alignment feature, that LaCyTools will use to find the maximum of each feature.")
createToolTip(self.alignSnLabel,"The minimum S/N of an alignment feature to be included in the alignment.")
createToolTip(self.alignMinLabel,"The minimum number of features that have a S/N higher than the minimum S/N for alignment to occur.")
createToolTip(self.calibMassWindowLabel,"The mass window in Dalton around the specified exact m/z of a calibrant, that LaCyTools uses to determine the uncalibrated accurate mass. This value will be charge state corrected, i.e. for a triple charged analyte the used window will be the value specified here divided by 3.")
createToolTip(self.calibSnLabel,"The minimum S/N of a calibrant to be included in the calibration.")
createToolTip(self.calibMinLabel,"The minimum number of calibrants that have a S/N higher than the minimum S/N for calibration to occur.")
createToolTip(self.sumSpecLabel,"The number of bins per m/z that will be used in the sum spectrum. A value of 100 means that each data point in the sum spectrum is spaced at 0.01 m/z.")
createToolTip(self.extracMassWindowLabel,"The m/z window in Thompson around the specified exact m/z of a feature that LaCyTools will use for quantitation. For example, a value of 0.1 results in LaCyTools quantifying 999.9 to 1000.1 for a feature with an m/z value of 1000.")
createToolTip(self.extracTimeWindowLabel,"The rt window in seconds around the specified elution time of each cluster that contains features for quantitation. For example, a value of 10 will result in LaCyTools creating a sum spectrum from 90 s. to 110 s. for a cluster eluting at 100s.")
createToolTip(self.extracMinChargeLabel,"The minimum charge state that LaCyTools will attempt to use in calibration and quantitation for all features listed in the analyte reference file.")
createToolTip(self.extracMaxChargeLabel,"The maximum charge state that LaCyTools will attempt to use in calibration and quantitation for all features listed in the analyte reference file.")
createToolTip(self.chargeCarrierLabel,"The charge carrier that is applied to all specified analytes for quantitation.")
createToolTip(self.extracPadLabel,"The number of windows before the regular analyte windows that will be examined to determine the IPQ.")
createToolTip(self.extracMinTotalLabel,"The minimum fraction of the theoretical isotopic pattern that LaCyTools will use for quantitation. For example, a value of 0.95 means that LaCyTools will quantify isotopes until the sum of the quantified isotopes exceeds 0.95 of the total theoretcal isotopic pattern.")
createToolTip(self.extracBackLabel,"The mass window in Dalton that LaCyTools is allowed to look for the local background and noise for each analyte. For example, a value of 10 means that LaCyTools will look from 990 m/z to 1010 m/z for an analyte with an m/z of 1000.")
createToolTip(self.extracSnCutoffLabel,"The minimum S/N of an analyte to be included in the spectral QC. Specifically, for the output that lists what fraction of the total quantified analytes passed the here specified S/N value.")
def getSettings(self):
""" This function reads the settings file as specified in the
program, applying them to the program.
"""
with open(SETTINGS_FILE,'r') as fr:
for line in fr:
line = line.rstrip('\n')
chunks = line.split()
if chunks[0] == "ALIGNMENT_TIME_WINDOW":
global ALIGNMENT_TIME_WINDOW
ALIGNMENT_TIME_WINDOW = float(chunks[1])
if chunks[0] == "ALIGNMENT_MASS_WINDOW":
global ALIGNMENT_MASS_WINDOW
ALIGNMENT_MASS_WINDOW = float(chunks[1])
if chunks[0] == "ALIGNMENT_S_N_CUTOFF":
global ALIGNMENT_S_N_CUTOFF
ALIGNMENT_S_N_CUTOFF = int(chunks[1])
if chunks[0] == "ALIGNMENT_MIN_PEAK":
global ALIGNMENT_MIN_PEAK
ALIGNMENT_MIN_PEAK = int(chunks[1])
if chunks[0] == "CALIB_MASS_WINDOW":
global CALIB_MASS_WINDOW
CALIB_MASS_WINDOW = float(chunks[1])
if chunks[0] == "CALIB_S_N_CUTOFF":
global CALIB_S_N_CUTOFF
CALIB_S_N_CUTOFF = int(chunks[1])
if chunks[0] == "CALIB_MIN_PEAK":
global CALIB_MIN_PEAK
CALIB_MIN_PEAK = int(chunks[1])
if chunks[0] == "SUM_SPECTRUM_RESOLUTION":
global SUM_SPECTRUM_RESOLUTION
SUM_SPECTRUM_RESOLUTION = int(chunks[1])
if chunks[0] == "MASS_WINDOW":
global MASS_WINDOW
MASS_WINDOW = float(chunks[1])
if chunks[0] == "TIME_WINDOW":
global TIME_WINDOW
TIME_WINDOW = float(chunks[1])
if chunks[0] == "MIN_CHARGE":
global MIN_CHARGE
MIN_CHARGE = int(chunks[1])
if chunks[0] == "MAX_CHARGE":
global MAX_CHARGE
MAX_CHARGE = int(chunks[1])
if chunks[0] == "MIN_TOTAL":
global MIN_TOTAL
MIN_TOTAL = float(chunks[1])
if chunks[0] == "BACKGROUND_TOTAL":
global BACKGROUND_TOTAL
BACKGROUND_TOTAL = int(chunks[1])
if chunks[0] == "S_N_CUTOFF":
global S_N_CUTOFF
S_N_CUTOFF = int(chunks[1])
def feature_reader(self,file):
""" This reads the contents of the alignmen features file and
stores the relevant values in a list.
INPUT: A filename
OUTPUT: A list of m/z,retention lists (elements are type float)
"""
features = []
with open(file,'r') as fr:
for line in fr:
try:
if line[0][0].isdigit():
line = line.rstrip().split()
features.append([float(x) for x in line])
except IndexError:
print ("Incorrect line observed in: ")+str(file)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tIncorrect line observed in: "+str(analyteFile)+"\n")
except:
print ("Unexpected Error: "), sys.exc_info()[0]
return features
def fitFunc(self, x,a,b,c):
penalty = 0
if b > 2.:
penalty = abs(b-1.)*10000
if b < 0.:
penalty = abs(2.-b)*10000
return a*x**b + c + penalty
def fitFuncLin(self, x,a,b):
return a*x + b
def calcQuadratic(self,data,func):
""" This function fits the specified function in 'fitFunc'
to the data, using the curve_fit package from scipy.optimize.
INPUT: A list of (m/z,int) tuples
OUTPUT: The parameters for the fitted function
"""
expected = []
observed = []
for i in data:
expected.append(i[0])
observed.append(i[1])
try:
if func == "PowerLaw":
z = curve_fit(self.fitFunc, observed, expected)#,maxfev=10000)
elif func == "Linear":
z = curve_fit(self.fitFuncLin,observed,expected)
name = self.inputFile.split(".")[0]
name = os.path.join(self.batchFolder,name)
#############
# Plot Code #
#############
minX = min(expected)-0.1*min(expected)
maxX = max(expected)+0.1*max(expected)
newX = numpy.linspace(minX,maxX,2500*(maxX-minX))
linY = newX
if func == "PowerLaw":
yNew = self.fitFunc(newX,*z[0])
minY = self.fitFunc(minX,*z[0])
maxY = self.fitFunc(maxX,*z[0])
elif func == "Linear":
yNew = self.fitFuncLin(newX,*z[0])
minY = self.fitFuncLin(minX,*z[0])
maxY = self.fitFuncLin(maxX,*z[0])
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
plt.scatter(expected,observed,c='b',label='Raw',alpha=0.5)
observedCalibrated = []
for index, j in enumerate(observed):
if func == "PowerLaw":
observedCalibrated.append(self.fitFunc(j,*z[0]))
elif func == "Linear":
observedCalibrated.append(self.fitFuncLin(j,*z[0]))
plt.scatter(expected,observedCalibrated,c='r',label='Calibrated',marker='s',alpha=0.5)
numbers = ["%.2f" % number for number in z[0]]
if func == "PowerLaw":
if float(numbers[2]) > 0.0:
plt.plot(newX,yNew,label="Fit, Function: "+str(numbers[0])+"x"+"$^{"+str(numbers[1])+"}$+"+str(numbers[2]),c='b')
else:
plt.plot(newX,yNew,label="Fit, Function: "+str(numbers[0])+"x"+"$^{"+str(numbers[1])+"}$"+str(numbers[2]),c='b')
elif func == "Linear":
if float(numbers[1]) > 0.0:
plt.plot(newX,yNew, label="Fit, Function: "+str(numbers[0])+"x+"+str(numbers[1]),c='b')
else:
plt.plot(newX,yNew, label="Fit, Function: "+str(numbers[0])+"x"+str(numbers[1]),c='b')
plt.plot(newX,linY,label='Target',c='r',linestyle='--')
plt.legend(loc='best')
plt.xlabel("Expected rt (s.)")
plt.ylabel("Observed rt (s.)")
plt.xlim(minX,maxX)
plt.ylim(minY,maxY)
fig.savefig(name,dpi=800)
plt.close()
###############
# end of plot #
###############
except RuntimeError:
z = None
return z
def dataPopup(self,master):
""" This function creates a popup window belonging to the HD5
data format. The window has a button where the user has to select
the location of his mzXML files, a checkbox indicating if the
mzXML files can be deleted afterwards and lastly a run button.
INPUT: None
OUTPUT: None
"""
if master.dataWindow == 1:
return
master.dataWindow = 1
self.folder = tk.StringVar()
self.ptFileName = tk.StringVar()
def close(self):
master.dataWindow = 0
top.destroy()
def batchButton():
master.openBatchFolder()
self.folder.set(master.batchFolder)
top = self.top = tk.Toplevel()
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.batchDir = tk.Button(top, text = "Batch Directory", width = 25, command = lambda: batchButton())
self.batchDir.grid(row = 0, column = 0, sticky = tk.W)
self.batch = tk.Label(top, textvariable = self.folder, width = 25)
self.batch.grid(row = 0, column = 1)
self.remove = tk.Checkbutton(top, text = "Remove mzXML files", variable = master.rmMZXML, onvalue = 1, offvalue = 0)
self.remove.grid(row = 1, column = 0, sticky = tk.W)
self.convertButton = tk.Button(top, text = "Batch Convert to pyTables", width = 25, command = lambda: master.batchConvert(master))
self.convertButton.grid(row = 2, column = 0,columnspan = 2)
def batchConvert(self,master):
""" TODO: COMMENT THIS FUNCTION PLEASE.
This function does x, using Y
INPUT: stuff
OUTPUT: stuff
"""
import time
start_time = time.time()
filenames = glob.glob(str(self.batchFolder)+"/*" + EXTENSION)
print ("Converting...")
filename = filenames[0]
array = []
self.inputFile = filename
self.readData(array,None)
nscans = len(array)
size = 0
for rt, spectrum in array:
size = max(size, len(spectrum))
SCAN_SIZE = int(size * 1.1)
try:
rawfile = tables.open_file(os.path.join(self.batchFolder, "pytables.h5"), "w", filters=tables.Filters(complevel=4, complib="blosc:lz4"))
except tables.HDF5ExtError:
print ("Error creating pyTables file")
raise
class Scan(tables.IsDescription):
sample = tables.Int64Col(pos=0)
scan = tables.Int64Col(pos=1)
rt = tables.Float64Col(pos=2)
art = tables.Float64Col(pos=3) # aligned retention time
idx = tables.Int64Col(pos=4)
size = tables.Int64Col(pos=5)
rawfile.create_vlarray('/', 'filenames', atom=tables.VLUnicodeAtom(), expectedrows=len(filenames))
rawfile.create_table('/', 'scans', description=Scan, expectedrows=len(filenames)*nscans)
rawfile.create_earray('/', 'mzs', atom=tables.Float64Atom((SCAN_SIZE,)), shape=(0,), chunkshape=(1,))
rawfile.create_earray('/', 'Is', atom=tables.Int64Atom((SCAN_SIZE,)), shape=(0,), chunkshape=(1,))
row = rawfile.root.scans.row
idx = 0
# main loop
for count, filename in enumerate(filenames):
self.inputFile = filename
if count >= 1:
array = []
self.readData(array,None)
mzs = numpy.zeros((len(array), SCAN_SIZE), numpy.float64)
Is = numpy.zeros((len(array), SCAN_SIZE), numpy.int64)
# loop over spectra
for scan, spectrum in enumerate(array):
rt, spectrum = spectrum
size = min(len(spectrum), SCAN_SIZE)
spectrum = numpy.array(spectrum).T
spectrum[0, 1:] = numpy.diff(spectrum[0])
mzs[scan, :size], Is[scan, :size] = spectrum[:, :size]
row['sample'] = count
row['scan'] = scan
row['rt'] = rt
row['idx'] = idx
row['size'] = size
row.append()
idx += 1
rawfile.root.mzs.append(mzs)
rawfile.root.Is.append(Is)
rawfile.root.filenames.append(filename)
if self.rmMZXML.get() == 1:
try:
os.remove(filename)
except:
raise
rawfile.close()
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tFinished converting\n")
end_time = time.time()
print ("Batch conversion lasted for ", str((end_time - start_time) / 60.), "minutes, or", str((end_time - start_time) / len(filenames)), "seconds per sample.")
messagebox.showinfo("Status Message","Batch Convert finished on "+str(datetime.now()))
def batchProcess(self,master):
""" This is the main controller function for batch processing.
First, the function checks if any reference or alignment file
was selected, producing a message box if this is not the case.
Afterwards, the function checks whether or not it has to read
from HD5 files or other accepted file formats. Subsequently,
it performs alignment if an alignment file is selected, followed
by quantitation (and calibration) if a reference list is
selected. Finally, it will combine all the individual results
into a summary file before cleaning up.
INPUT: None
OUTPUT: A summary file
"""
import time
start = time.time()
# Safety feature (prevents batchProcess from being started multiple times)
if self.batchProcessing == 1:
messagebox.showinfo("Error Message", "Batch Process already running")
return
self.batchProcessing = 1
#####################
# PROGRESS BAR CODE #
#####################
self.alPerc = tk.StringVar()
self.extPerc = tk.StringVar()
self.alPerc.set("0%")
self.extPerc.set("0%")
# barWindow = Tk()
barWindow = self.top = tk.Toplevel()
barWindow.title("Progress Bar")
al = tk.Label(barWindow, text="Alignment", padx=25)
al.grid(row=0, column=0, sticky=tk.W)
ft = ttk.Frame(barWindow)
ft.grid(row=1, columnspan=2)
perc1 = tk.Label(barWindow, textvariable=self.alPerc)
perc1.grid(row=0, column=1, padx=25)
progressbar = ttk.Progressbar(ft, length=100, mode='determinate')
progressbar.grid(row=1, columnspan=2)
ext = tk.Label(barWindow, text="Quantitation", padx=25)
ext.grid(row=2, column=0, sticky=tk.W)
ft2 = ttk.Frame(barWindow)
ft2.grid(row=3, columnspan=2)
perc2 = tk.Label(barWindow, textvariable=self.extPerc)
perc2.grid(row=2, column=1, padx=25)
progressbar2 = ttk.Progressbar(ft2, length=100, mode='determinate')
progressbar2.grid(row=3, columnspan=2)
###################
# END OF BAR CODE #
###################
# Check if reference or alignment file was selected
if self.refFile == "" and self.alFile == "" and self.calFile == "":
messagebox.showinfo("File Error","No reference or alignment file selected")
# Check for pytables file
if os.path.isfile(os.path.join(self.batchFolder,"pytables.h5")):
ptFileName = os.path.join(self.batchFolder,"pytables.h5")
if self.ptFile is None:
self.ptFile = tables.open_file(ptFileName, mode='a')
filenames = self.ptFile.root.filenames[:]
self.readData = self.readPTData
self.transform_mzXML = self.alignRTs
filenames2idx = dict([(filename, idx) for idx, filename in enumerate(filenames)])
print ('Found "pytables.h5" in batch folder.')
else:
filenames = glob.glob(os.path.join(str(self.batchFolder),"*"+EXTENSION))
filenames2idx = dict([(filename, idx) for idx, filename in enumerate(filenames)])
# ALIGNMENT
if self.alFile != "":
features = []
features = self.feature_reader(self.alFile)
features = sorted(features, key = lambda tup: tup[1])
# reset aligned rts to 0
if self.ptFile is not None and self.ptFile.isopen:
for scan in self.ptFile.root.scans:
scan['art'] = 0
scan.update()
self.ptFile.flush()
for index,file in enumerate(filenames):
self.alPerc.set(str(int( (float(index) / float(len(filenames) ) ) *100))+"%")
progressbar["value"] = int( (float(index) / float(len(filenames) ) ) *100)
progressbar.update()
array = []
timePairs = []
self.inputFile = file
self.inputFileIdx = filenames2idx[file]
readTimes = self.matchFeatureTimes(features)
self.readData(array,readTimes)
strippedFeatures = []
for i in features:
peakTime = 0
peakIntensity = 0
dataPoints = []
leftPoints = []
rightPoints = []
signalPoints = []
for j in array:
if j[0] > i[1] - 2*ALIGNMENT_TIME_WINDOW and j[0] < i[1] - ALIGNMENT_TIME_WINDOW:
dataPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
leftPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
if j[0] > i[1] + ALIGNMENT_TIME_WINDOW and j[0] < i[1] + 2*ALIGNMENT_TIME_WINDOW:
dataPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
rightPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
if j[0] < i[1] + ALIGNMENT_TIME_WINDOW and j[0] > i[1] - ALIGNMENT_TIME_WINDOW:
signalPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
if self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW) > peakIntensity:
peakIntensity = self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW)
peakTime = j[0]
############################################################################################
# Awesome cool method (with milk and cookies!) to determine background and noise in an EIC #
############################################################################################
sortedData = sorted(dataPoints)
startSize = int(0.25 * float(len(sortedData)))
currSize = startSize
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
directionFlag = 0
for k in range(0,len(sortedData)-(startSize+1)):
if sortedData[currSize+1] < currAverage + 3 * currNoise:
directionFlag == 1
currSize += 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
if sortedData[currSize-1] > currAverage + 3 * currNoise and directionFlag == 0:
currSize -= 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
break
background = currAverage
noise = currNoise
######################
# End of awesomeness #
######################
# Plot Code #
#############
"""plotPoints = leftPoints + signalPoints + rightPoints
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sorted(plotPoints))
#plt.plot(plotPoints)
plt.axhline(y=currAverage, color ='k')
plt.axhline(y=currAverage + 3* currNoise, color = 'r')
#plt.axvline(x=len(plotPoints)/3, color = 'r')
#plt.axvline(x=(len(plotPoints)/3)*2, color = 'r')
plt.show()"""
###############
# end of plot #
###############
if peakIntensity > background + ALIGNMENT_S_N_CUTOFF * noise:
timePairs.append((i[1],peakTime))
strippedFeatures.append(i)
else:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+"\tFeature: "+str(i)+" was not above alignment S/N cutoff: "+str(ALIGNMENT_S_N_CUTOFF)+" in file: "+str(file)+"\n")
# Make sure that enough features are used for alignment
if len(timePairs) >= ALIGNMENT_MIN_PEAK:
warnUser = False
# Attempt advanced alignment (PowerLaw)
alignFunction = self.calcQuadratic(timePairs,"PowerLaw")
# Fall back to basic alignment (Linear)
if alignFunction == None:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tAdvanced alignment failed on file: "+str(file)+", switching to basic alignment\n")
alignFunction = self.calcQuadratic(timePairs,"Linear")
if alignFunction == None:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+"\tFile: "+str(file)+" could not be aligned. Both advanced and basic alignment fits failed\n")
outFile = os.path.split(file)[-1]
outFile = "unaligned_"+outFile
outFile = os.path.join(self.batchFolder,outFile)
open(outFile,'w').close()
continue
# Bind correct fit function to fit (PowerLaw or Linear)
if len(alignFunction[0]) == 3:
fit = self.fitFunc
elif len(alignFunction[0]) == 2:
fit = self.fitFuncLin
# Create alignment output file
alignmentOutput = self.inputFile.split(".")[0]
alignmentOutput = alignmentOutput + ".alignment"
with open(alignmentOutput,'w') as falign:
lsq = 0
falign.write("Peak\tExpected RT\tOriginal RT\tAligned RT\n")
for index,timePair in enumerate(timePairs):
falign.write(str(strippedFeatures[index][0])+"\t"+str(timePair[0])+"\t"+str(timePair[1])+"\t"+str(fit(float(timePair[1]),*alignFunction[0]))+"\n")
lsq += float(strippedFeatures[index][0]) - fit(float(timePair[1]),*alignFunction[0])
self.transform_mzXML(file,fit,alignFunction[0])
else:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tFile not aligned due to lack of features\n")
outFile = os.path.split(file)[-1]
outFile = "unaligned_"+outFile
outFile = os.path.join(self.batchFolder,outFile)
open(outFile,'w').close()
self.alPerc.set("100%")
progressbar["value"] = 100
# (CALIBRATION AND) EXTRACTION
if self.refFile != "":
if self.analyteIntensity.get() == 0 and self.analyteRelIntensity.get() == 0 and self.analyteBackground.get() == 0 and self.analyteNoise.get() == 0 and self.alignmentQC.get() == 0 and self.qualityControl.get() == 0 and self.spectraQualityControl.get() == 0:
messagebox.showinfo("Output Error","No outputs selected")
self.initCompositionMasses(self.refFile)
ref = []
self.refParser(ref)
times = []
for i in ref:
times.append((i[4],i[5]))
chunks = collections.OrderedDict()
for i in times:
if i not in chunks.keys():
chunks['%s' % '-'.join(i)] = []
for i in ref:
chunks['%s' % '-'.join((i[4],i[5]))].append(i)
if os.path.isfile(os.path.join(self.batchFolder,"pytables.h5")) == False:
filenames = glob.glob(os.path.join(str(self.batchFolder),EXTRACTION+"*"+EXTENSION))
filenames2idx = dict([(filename, idx) for idx, filename in enumerate(filenames)])
for index,file in enumerate(filenames):
self.extPerc.set(str(int( (float(index) / float(len(filenames) ) ) *100))+"%")
progressbar2["value"] = int( (float(index) / float(len(filenames) ) ) *100)
progressbar2.update()
results = []
self.inputFile = file
self.inputFileIdx = filenames2idx[file]
array = []
readTimes = self.matchAnalyteTimes(ref)
self.readData(array, readTimes)
for index,i in enumerate(chunks.keys()):
spectrum = self.sumSpectrum(i,array)
# Dirty hack to now get rid of the time window again
rt = tuple(i.split('-'))[0]
calibrants = []
# Calibrate the sum spectrum
if self.calFile.get() == 1:
for j in ref:
if j[6] == "True" and int(round(float(j[4]))) == int(round(float(rt))):
charge = j[0].split("_")[-2]
calibrants.append((float(j[1]),int(charge)))
measuredMaxima = self.getLocalMaxima(calibrants,spectrum)
presentCalibrants = self.getObservedCalibrants(measuredMaxima,calibrants)
measuredMaximaMZ = []
# Strip the m/z values from the maxima
for j in measuredMaxima:
measuredMaximaMZ.append(j[0])
# Perform 2d degree polynomial fit
if len(measuredMaximaMZ) >= CALIB_MIN_PEAK:
z = numpy.polyfit(measuredMaximaMZ,presentCalibrants,2) # This should be the correct one
else:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tUnable to calibrate the sum spectrum at "+str(i)+" seconds\n")
# Adjust filename
(old, new) = os.path.split(self.inputFile)
old = os.path.abspath(old)
new = os.path.splitext(new)[0]
new = "Uncalibrated_sumSpectrum_"+str(i)+"_"+str(new)+".xy"
new = os.path.join(old,new)
outFile = "\\\\?\\"+new
# Write
with open(outFile,'w') as fw:
fw.write("\n".join(str(j[0])+"\t"+str(j[1]) for j in spectrum))
continue
f = numpy.poly1d(z)
calOut = str(file.split(".")[0])+"_"+str(index)+".calibration"
with open(calOut,'w') as fw2:
for index,j in enumerate(measuredMaximaMZ):
fw2.write("accurate mass: "+str(presentCalibrants[index])+" measured at "+str(j) +" being calibrated to: "+str(f(j))+"\n")
mzList = []
intList = []
for j in spectrum:
mzList.append(float(j[0]))
intList.append(int(j[1]))
# Transform python list into numpy array
mzArray = numpy.array(mzList)
newArray = f(mzArray)
newSpectrum = []
for index,j in enumerate(newArray):
newSpectrum.append((j,intList[index]))
spectrum = newSpectrum
# Adjust filename
(old, new) = os.path.split(self.inputFile)
old = os.path.abspath(old)
new = os.path.splitext(new)[0]
new = "sumSpectrum_"+str(i)+"_"+str(new)+".xy"
new = os.path.join(old,new)
outFile = "\\\\?\\"+new
# Write
with open(outFile,'w') as fw:
fw.write("\n".join(str(j[0])+"\t"+str(j[1]) for j in spectrum))
else:
# Adjust filename
(old, new) = os.path.split(self.inputFile)
old = os.path.abspath(old)
new = os.path.splitext(new)[0]
new = "sumSpectrum_"+str(i)+"_"+str(new)+".xy"
new = os.path.join(old,new)
outFile = "\\\\?\\"+new
# Write
with open(outFile,'w') as fw:
fw.write("\n".join(str(j[0])+"\t"+str(j[1]) for j in spectrum))
self.extractData(chunks[i],spectrum,results)
self.writeResults(results,file)
# Wrap up stuff
self.extPerc.set("100%")
progressbar2["value"] = 100
barWindow.destroy()
self.combineResults()
if self.ptFile is not None:
self.ptFile.close()
self.batchProcessing = 0
end = time.time()
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tBatch process lasted for "+str((end - start) / 60.)+"minutes\n")
messagebox.showinfo("Status Message","Batch Process finished on "+str(datetime.now()))
def writeCalibration(self,function,array):
""" This function creates a calibrated mzXML file. However, the
function is currently not being used and might be removed in the
future.
INPUT: Calibration function and the raw data in an array
OUTPUT: A calibrated mzXML file
"""
endian = "!"
started = False
with open(self.inputFile,'r') as fr:
name = os.path.split(str(self.inputFile))[-1]
name = name.split(".")[0]
name = "calibrated_"+name+".mzXML" # TODO: Make the extension dynamic
with open(name,'w') as fw:
counter = 0
mzList = []
intList = []
values = []
for line in fr:
if 'zlib' in line:
fw.write(line)
compression = True
elif 'byteOrder' in line:
fw.write(line)
byteOrder = line.split("byteOrder")[1]
byteOrder = byteOrder.split("\"")[1]
endian = "!"
if byteOrder == 'little':
endian = '<'
elif byteOrder == 'big':
endian = '>'
elif 'precision' in line:
fw.write(line)
precision = line.split("precision")[1]
precision = precision.split("\"")[1]
if int(precision) == 64:
precision = 'd'
else:
precision = 'f'
elif 'contentType="m/z-int">' in line:
mzList = []
intList = []
values = []
for i in array[counter][1]:
mzList.append(i[0])
intList.append(i[1])
mzArray = numpy.array(mzList)
newArray = function(mzArray)
for index, i in enumerate(newArray):
values.append(i)
values.append(intList[index])
format = str(endian)+str(len(values))+precision
data = struct.pack(format, *values)
if compression == True:
data = zlib.compress(data)
data = base64.b64encode(data)
fw.write('contentType="m/z-int">'+str(data)+'</peaks>\n')
counter += 1
else:
fw.write(line)
def getObservedCalibrants(self,maxima,potentialCalibrants):
""" This function compares the list of local maxima with the
expected calibrants. The function will perceive the observed
local maxima that is closest to a desired calibrant as being the
m/z where the calibrant was observed in the spectrum. The
function then appends the theoretical m/z value of a calibrants
that were actually observed to a list (actualCalibrants) which
is returned at the end of the function.
INPUT 1: A list of floats containg the observed local maxima (of
the spline fit within each inclusion range, assuming that they
were above user specified S/N cut off).
INPUT 2: A list of floats containing the theoretical m/z of all
calibrants.
OUTPUT: A list of floats containing the theoretical m/z of the
calibrants which were near an oberved local maxima.
"""
actualCalibrants = []
for i in maxima:
diff = 4.0
closest = 0
for j in potentialCalibrants:
if abs(float(j[0])-float(i[0])) < diff:
diff = abs(float(j[0])-float(i[0]))
closest = float(j[0])
actualCalibrants.append(closest)
return actualCalibrants
def getLocalMaxima(self,features,spectrum):
""" This function takes a list of potential calibrants and will
identify the m/z value that shows the maximum intensity. The
function will determine the accurate mass from a interpolated
univariate spline that is fitted through the data points,
yielding improved post calibration mass accuracy.
INPUT: A spectrum and a list of features (mass,charge)
OUTPUT: A containing (accurate mass, intensity) tuples for the
calibrants that passed the user specified S/N cutoff.
"""
maxima = []
for i in features:
mass, charge = i
window = CALIB_MASS_WINDOW / charge
lowMz = self.binarySearch(spectrum,float(mass)-float(window),len(spectrum)-1,'left')
highMz = self.binarySearch(spectrum,float(mass)+float(window),len(spectrum)-1,'right')
x_points = []
y_points = []
for j in spectrum[lowMz:highMz]:
x_points.append(j[0])
y_points.append(j[1])
newX = numpy.linspace(x_points[0],x_points[-1],2500*(x_points[-1]-x_points[0]))
maximum = (newX[int(len(newX)/2)],0)
try:
f = InterpolatedUnivariateSpline(x_points,y_points)
ySPLINE = f(newX)
for index, j in enumerate(ySPLINE):
if j > maximum[1]:
maximum = (newX[index],j)
except ValueError:
data = zip(x_points,y_points)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tGuassian Curve Fit failed for analyte: "+str(i[0])+", reverting to non fitted local maximum\n")
for j in data:
if j[1] > maximum[1]:
maximum = (j[0],j[1])
except:
print ("Analyte: "+str(i[0])+" is being troublesome, kill it")
# Plot Code (for testing purposes)
"""fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x_points, y_points, 'b*')
#plt.plot(newX,newY, 'b--')
#plt.plot(newX,ySPLINE,'r--')
plt.plot(newX,yINTER,'r--')
plt.plot(newX,yUNIVAR,'g--')
#plt.legend(['Raw Data','Guassian (All Points)','Cubic Spline'], loc='best')
#plt.legend(['Raw Data','Cubic Spline'], loc='best')
plt.legend(['Raw Data','Interp1d','Univariate Spline'], loc='best')
plt.show()"""
# Check if maxima above S/N cut-off
values = self.getBackground(spectrum, maximum[0], charge, window)
background,noise = values[0], values[2]
if maximum[1] > background + CALIB_S_N_CUTOFF * noise:
maxima.append(maximum)
return maxima
def readCalibrationFeatures(self):
""" This function reads the calibration file and returns the
features in a tuple containg the lower time and upper time values
followed by a list of the m/z coordinates
INPUT: None
OUTPUT: Tuple containing (lowTime, highTime, [m/z coordinatse])
"""
with open(self.calFile,'r') as fr:
firstLine = fr.readline()
lowTime, highTime = firstLine.strip().split("\t")
mz = []
for line in fr:
mz.append(float(line.strip()))
return (float(lowTime), float(highTime), mz)
def sumSpectrum(self,time,array):
""" This function creates a summed spectrum and returns the
resulting spectrum back to the calling function.
INPUT: The retention time-time window and an array containing
the entire measurement
OUTPUT: A sum spectrum in array form (m/z, intensity)
"""
time = tuple(time.split('-'))
# This is returning None's now
lowTime = self.binarySearch(array,float(time[0])-float(time[1]),len(array)-1,'left')
highTime = self.binarySearch(array,float(time[0])+float(time[1]),len(array)-1,'right')
LOW_MZ = 25000.0
HIGH_MZ = 0.0
for i in array[lowTime:highTime]:
if i[1][0][0] < LOW_MZ:
LOW_MZ = i[1][0][0]
if i[1][-1][0] > HIGH_MZ:
HIGH_MZ = i[1][-1][0]
# This should be dynamically determined
arraySize = (float(HIGH_MZ) - float(LOW_MZ)) * float(SUM_SPECTRUM_RESOLUTION)
combinedSpectra = numpy.zeros(shape=(int(arraySize+2),2))
bins = []
for index, i in enumerate(combinedSpectra):
i[0] = float(LOW_MZ) + index*(float(1)/float(SUM_SPECTRUM_RESOLUTION))
bins.append(float(LOW_MZ) + index*(float(1)/float(SUM_SPECTRUM_RESOLUTION)))
fullSet = []
mz = []
start = datetime.now()
for i in array[lowTime:highTime]:
for j in i[1]:
fullSet.append(j)
mz.append(j[0])
fullSet.sort(key = lambda tup: tup[0])
mz.sort()
mzArray = numpy.asarray(mz)
binsArray = numpy.asarray(bins)
test = numpy.searchsorted(binsArray,mzArray)
for index, i in enumerate(fullSet):
try:
combinedSpectra[test[index]][1] += i[1]
except:
# We ignore the data points at m/z edge, if they are important
# then the user should do a proper measurement.
pass
#from scipy.signal import savgol_filter
#new = savgol_filter(combinedSpectra,21,3)
#return new
return combinedSpectra
def findNearest(self,array,value):
""" A depracated function, will most likely be removed in the
near future.
"""
if value >= array[0][0] and value <= array[-1][0]:
diff = 1
# First Pass
a = 0
b = len(array)
while a < b:
mid = (a+b)//2
if array[mid][0] > value:
b = mid
else:
a = mid+1
if array[a][0] - value < diff:
diff = array[a][0] - value
index = a
# Second Pass
a = 0
b = len(array)
while a < b:
mid = (a+b)//2
if array[mid][0] < value:
a=mid+1
else:
b=mid
if array[a][0] - value < diff:
diff = array[a][0] - value
index = a
return a
def transform_mzXML(self,file,fit,alignFunction):
"""Reads the mzXML file and transforms the reported retention
time by the specified polynomial function.
INPUT: A filename, alignment function and fitting model
OUTPUT: An aligned mzXML file
"""
with open(file,'r') as fr:
outFile = os.path.split(file)[-1]
# Use * to indicate files that were aligned using the basic alignment
if len(alignFunction) == 3:
outFile = "aligned_"+outFile
elif len(alignFunction) == 2:
outFile = "alignedLin_"+outFile
outFile = os.path.join(self.batchFolder,outFile)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tWriting output file: "+outFile+"\n")
with open(outFile,'w') as fw:
for line in fr:
if 'retentionTime' in line:
time = line.strip()
time = time.split("\"")
for index,i in enumerate(time):
if 'retentionTime' in i:
time=time[index+1]
break
if time[0] == 'P':
time = time[2:-1]
# The below line is only to make it work with mzMine
if fit(float(time),*alignFunction) < 0:
newTime = str(0)
else:
newTime = str(fit(float(time),*alignFunction))
line = line.replace(time,newTime)
fw.write(line)
else:
fw.write(line)
def alignRTs(self,file,polynomial):
"""Reads the mzXML file and transforms the reported retention
time by the specified polynomial function.
INPUT: A filename and the alignment function
OUTPUT: An aligned mzXML file
"""
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tAligning file: "+str(self.inputFile)+"\n")
i = self.inputFileIdx
for row in self.ptFile.root.scans.where("sample == i"):
time = row['rt']
# The below line is only to make it work with mzMine
if self.fitFunc(time,*polynomial) > 0:
row['art'] = self.fitFunc(time,*polynomial)
row.update()
self.ptFile.flush()
def feature_finder(self,data,lowMass,highMass):
# Proper fix
intensity = 0
start = self.binarySearch(data,lowMass,len(data)-1,'left')
end = self.binarySearch(data,highMass,len(data)-1,'right')
for i in data[start:end]:
if i[1] > intensity:
intensity = i[1]
return intensity
def createHeader(self, compositions, reference, chargestate=None):
"""Creates a generic header for both combined and separate
charge states. The function uses the initial reference list,
the extracted compositions and the optional chargestate.
INPUT 1: A list of tuples (analyte composition, analyte
retention time)
INPUT 2: A list of analyte reference tuples (analyte, m/z,
relative area, m/z window, rt, rt window and
calibration)
INPUT 3: An integer
OUTPUT: A string containing the header for the final summary
"""
header = ""
for i in compositions:
header += "\t"+str(i[0])
header += "\n"
# List of theoretical areas
header += "Fraction"
for i in compositions:
sumInt = 0.
for j in reference:
analyte = "_".join(j[0].split("_")[:-2])
charge = j[0].split("_")[-2]
isotope = j[0].split("_")[-1]
time = j[4]
timewindow = j[5]
if chargestate == None:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]):
sumInt += float(j[2])
else:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and int(chargestate) == int(charge):
sumInt += float(j[2])
header += "\t"
if sumInt > 0.:
header += str(sumInt)
header += "\n"
# List of monoisotopic masses
header += "Monoisotopic Mass"
for i in compositions:
masses = []
current_relative_intensity = 0.
# Retrieve highest relative intensity
for j in reference:
analyte = "_".join(j[0].split("_")[:-2])
charge = j[0].split("_")[-2]
isotope = j[0].split("_")[-1]
relative_intensity = float(j[2])
time = j[4]
timewindow = j[5]
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and relative_intensity >= current_relative_intensity:
current_relative_intensity = relative_intensity
# Get actual data
for j in reference:
analyte = "_".join(j[0].split("_")[:-2])
charge = j[0].split("_")[-2]
isotope = j[0].split("_")[-1]
relative_intensity = float(j[2])
time = j[4]
timewindow = j[5]
if chargestate == None:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and relative_intensity == current_relative_intensity:
masses.append(float(j[1]))
else:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and relative_intensity == current_relative_intensity and int(chargestate) == int(charge):
masses.append(float(j[1]))
if masses:
masses = "["+", ".join(map(str, masses))+"]"
else:
masses = ""
try:
header += "\t"+masses
except TypeError:
header += "\tNA"
header += "\n"
return header
def combineResults(self):
""" This function reads all the raw files and creates the summary
output file.
INPUT: None
OUTPUT: A summary file
"""
total = []
ref = []
self.refParser(ref)
for file in glob.glob(os.path.join(str(self.batchFolder),"*.raw")):
compositions = []
trigger = 0
results = []
with open(file,'r') as fr:
name = str(file)
name = os.path.split(str(name))[-1]
current = None
for _ in range(2):
next(fr)
for line in fr:
if not line:
break
line = line.strip().split("\t")
if current:
if current.composition == line[0] and current.time == line[5] and current.timeWindow == line[8]:
foo = Isotope()
foo.isotope = line[2]
foo.mass = float(line[3])
foo.measMass = float(line[4])
foo.charge = line[1]
foo.obsInt = float(line[9])
foo.obsMax = float(line[13])
foo.expInt = float(line[6])
foo.background = float(line[10])
foo.backgroundPoint = float(line[11])
foo.noise = float(line[12])
current.isotopes.append(foo)
else:
results.append(current)
current = Analyte()
current.composition = line[0]
current.time = line[5]
current.timeWindow = line[8]
current.massWindow = line[7]
current.isotopes = []
foo = Isotope()
foo.isotope = line[2]
foo.mass = float(line[3])
foo.measMass = float(line[4])
foo.charge = line[1]
foo.obsInt = float(line[9])
foo.obsMax = float(line[13])
foo.expInt = float(line[6])
foo.background = float(line[10])
foo.backgroundPoint = float(line[11])
foo.noise = float(line[12])
current.isotopes.append(foo)
else:
current = Analyte()
current.composition = line[0]
current.time = line[5]
current.timeWindow = line[8]
current.massWindow = line[7]
current.isotopes = []
foo = Isotope()
foo.isotope = line[2]
foo.mass = float(line[3])
foo.measMass = float(line[4])
foo.charge = line[1]
foo.obsInt = float(line[9])
foo.obsMax = float(line[13])
foo.expInt = float(line[6])
foo.background = float(line[10])
foo.backgroundPoint = float(line[11])
foo.noise = float(line[12])
current.isotopes.append(foo)
results.append(current)
total.append((name,results))
for file in glob.glob(str(self.batchFolder)+"/unaligned*"+EXTENSION):
name = str(file)
name = os.path.split(str(name))[-1]
total.append((name,[]))
total.sort()
# Test chunk to see if class conversion worked
"""for i in total:
for j in i[1]:
print j.composition
if j.composition == "IgGI1H3N4F1":
for k in j.isotopes:
print k.isotope, k.charge, k.obsInt"""
#################################
# Generate the summaryFile name #
#################################
utc_datetime = datetime.utcnow()
s = utc_datetime.strftime("%Y-%m-%d-%H%MZ")
filename = s +"_"+OUTPUT
summaryFile = os.path.join(self.batchFolder,filename)
####################################################################
# Get list of analytes, required for correct alignment of clusters #
####################################################################
compositions = []
with open(self.refFile,'r') as fr:
for line in fr:
if line[0] == "#":
continue
parts=line.rstrip('\n').split('\t')
if not parts[3]:
parts[3] = TIME_WINDOW
compositions.append((parts[0],parts[1],parts[3]))
#############################
# Start writing the results #
#############################
with open(summaryFile,'w') as fw:
##############
# Parameters #
##############
fw.write("Parameter Settings\n")
fw.write("LaCyTools Version\t"+str(self.version)+"\n")
fw.write("LaCyTools Build\t"+str(self.build)+"\n")
if self.alFile != "":
fw.write("Alignment Parameters\n")
fw.write("ALIGNMENT_TIME_WINDOW\t"+str(ALIGNMENT_TIME_WINDOW)+"\n")
fw.write("ALIGNMENT_MASS_WINDOW\t"+str(ALIGNMENT_MASS_WINDOW)+"\n")
fw.write("ALIGNMENT_S_N_CUTOFF\t"+str(ALIGNMENT_S_N_CUTOFF)+"\n")
fw.write("ALIGNMENT_MIN_PEAK\t"+str(ALIGNMENT_MIN_PEAK)+"\n")
if self.calFile.get() == 1:
fw.write("Calibration Parameters\n")
fw.write("CALIB_MASS_WINDOW\t"+str(CALIB_MASS_WINDOW)+"\n")
fw.write("CALIB_S_N_CUTOFF\t"+str(CALIB_S_N_CUTOFF)+"\n")
fw.write("CALIB_MIN_PEAK\t"+str(CALIB_MIN_PEAK)+"\n")
if self.refFile != "":
fw.write("Extraction Parameters\n")
fw.write("SUM_SPECTRUM_RESOLUTION\t"+str(SUM_SPECTRUM_RESOLUTION)+"\n")
fw.write("MASS_WINDOW\t"+str(MASS_WINDOW)+"\n")
fw.write("TIME_WINDOW\t"+str(TIME_WINDOW)+"\n")
fw.write("MIN_CHARGE\t"+str(MIN_CHARGE)+"\n")
fw.write("MAX_CHARGE\t"+str(MAX_CHARGE)+"\n")
fw.write("MIN_TOTAL\t"+str(MIN_TOTAL)+"\n")
fw.write("BACKGROUND_WINDOW\t"+str(BACKGROUND_WINDOW)+"\n\n")
##############################
# Analyte Absolute Intensity #
##############################
if self.analyteIntensity.get() == 1 and self.analyteBckSub.get() == 0:
##########################
# Combined charge states #
##########################
if self.analytePerCharge.get() == 0:
# Header
header = "Absolute Intensity"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Absolute Intensity ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
######################################################
# Analyte Absolute Intensity (Background subtracted) #
######################################################
if self.analyteIntensity.get() == 1 and self.analyteBckSub.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Absolute Intensity (Background Subtracted)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt - l.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Absolute Intensity (Background Subtracted, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt - m.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################################################
# Analyte Relative Intensity (Total Normalization) #
####################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 0 and self.normalizeCluster.get() == 0:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
totalIntensity = 1
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
totalIntensity += max(0, l.obsInt)
except AttributeError:
pass
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity ("+str(i)+"+)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
totalIntensity = 1
for k in compositions:
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
totalIntensity += max(0, m.obsInt)
except AttributeError:
pass
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
######################################################
# Analyte Relative Intensity (Cluster Normalization) #
################################################################################################
# TODO: Check if this can not be simplified now that we have timewindow in compositions as [2] #
################################################################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 0 and self.normalizeCluster.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity (Cluster Normalization)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual Data
clusters = []
for i in total:
for j in compositions:
for k in i[1]:
try:
currentCluster = "-".join((k.time, k.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for i in total:
clusterValues = []
fw.write(str(i[0]))
for j in clusters:
clusterTime = float(j.split("-")[0])
clusterWindow = float(j.split("-")[1])
totalIntensity = 1
for k in compositions:
for l in i[1]:
try:
if l.composition == k[0] and float(l.time) == clusterTime and float(l.timeWindow) == clusterWindow and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
for m in l.isotopes:
totalIntensity += max(0, m.obsInt)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for j in compositions:
flag = 0
sumInt = 0
for k in i[1]:
for l in clusterValues:
try:
if k.composition == j[0] and float(k.time) == l[0] and float(k.timeWindow) == l[1] and float(j[1]) == float(k.time) and float(j[2]) == float(k.timeWindow):
flag = 1
for m in k.isotopes:
sumInt += max(0, m.obsInt)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(l[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity (Cluster Normalization, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
clusters = []
for j in total:
for k in compositions:
for l in j[1]:
try:
currentCluster = "-".join((l.time, l.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for j in total:
clusterValues = []
fw.write(str(j[0]))
for k in clusters:
clusterTime = float(k.split("-")[0])
clusterWindow = float(k.split("-")[1])
totalIntensity = 1
for l in compositions:
for m in j[1]:
try:
if m.composition == l[0] and float(m.time) == clusterTime and float(m.timeWindow) == clusterWindow and float(l[1]) == float(m.time) and float(l[2]) == float(m.timeWindow):
for n in m.isotopes:
if int(n.charge) == i:
totalIntensity += max(0, n.obsInt)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for k in compositions:
flag = 0
sumInt = 0
for l in j[1]:
for m in clusterValues:
try:
if l.composition == k[0] and float(l.time) == m[0] and float(l.timeWindow) == m[1] and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
flag = 1
for n in l.isotopes:
if int(n.charge) == i:
sumInt += max(0, n.obsInt)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(m[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
##################################################################
# Background Subtracted Relative Intensity (Total Normalization) #
##################################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 1 and self.normalizeCluster.get() == 0:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity (Background Subtracted)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
totalIntensity = 1
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
totalIntensity += max(0, l.obsInt - l.background)
except AttributeError:
pass
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt - l.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity (Background Subtracted, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
totalIntensity = 1
for k in compositions:
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
totalIntensity += max(0, m.obsInt - m.background)
except AttributeError:
pass
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt - m.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
#####################################################################
# Background Subtracted Relative Intensity (Cluster Normalization) #
#####################################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 1 and self.normalizeCluster.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity (Background Subtracted, Cluster Normalization)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual Data
clusters = []
for i in total:
for j in compositions:
for k in i[1]:
try:
currentCluster = "-".join((k.time, k.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for i in total:
fw.write(str(i[0]))
clusterValues = []
for j in clusters:
clusterTime = float(j.split("-")[0])
clusterWindow = float(j.split("-")[1])
totalIntensity = 1
for k in compositions:
for l in i[1]:
try:
if l.composition == k[0] and float(l.time) == clusterTime and float(l.timeWindow) == clusterWindow and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
for m in l.isotopes:
totalIntensity += max(0, m.obsInt - m.background)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for j in compositions:
flag = 0
sumInt = 0
for k in i[1]:
for l in clusterValues:
try:
if k.composition == j[0] and float(k.time) == l[0] and float(k.timeWindow) == l[1] and float(j[1]) == float(k.time) and float(j[2]) == float(k.timeWindow):
flag = 1
for m in k.isotopes:
sumInt += max(0, m.obsInt - m.background)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(l[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity (Background Subtracted, Cluster Normalization, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
clusters = []
for j in total:
for k in compositions:
for l in j[1]:
try:
currentCluster = "-".join((l.time, l.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for j in total:
clusterValues = []
fw.write(str(j[0]))
for k in clusters:
clusterTime = float(k.split("-")[0])
clusterWindow = float(k.split("-")[1])
totalIntensity = 1
for l in compositions:
for m in j[1]:
try:
if m.composition == l[0] and float(m.time) == clusterTime and float(m.timeWindow) == clusterWindow and float(l[1]) == float(m.time) and float(l[2]) == float(m.timeWindow):
for n in m.isotopes:
if int(n.charge) == i:
totalIntensity += max(0, n.obsInt - n.background)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for k in compositions:
flag = 0
sumInt = 0
for l in j[1]:
for m in clusterValues:
try:
if l.composition == k[0] and float(l.time) == m[0] and float(l.timeWindow) == m[1] and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
flag = 1
for n in l.isotopes:
if int(n.charge) == i:
sumInt += max(0, n.obsInt - n.background)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(m[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
################################
# Analyte Background Intensity #
################################
if self.analyteBackground.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Background"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += l.background
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Background ("+str(i)+"+)"+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += m.background
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
#######################
# Analyte Noise Value #
#######################
if self.analyteNoise.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Noise"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += l.noise
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Noise ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += m.noise
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
#######################
# Alignment Residuals #
#######################
if self.alignmentQC.get() == 1:
# Get results
totalResults = []
for file in glob.glob(os.path.join(str(self.batchFolder),"*.alignment")):
resultBuffer = []
with open (file,'r') as fr:
for line in fr:
line = line.strip().split()
resultBuffer.append(line)
totalResults.append((file,resultBuffer))
# Header
header = []
for i in totalResults:
if len(i[1]) > len(header):
header = i[1][:]
fw.write("Alignment Features Residual")
for i in header[1:]:
fw.write("\t"+str(i[0]))
fw.write("\tRMS\n")
# Actual Data
for i in totalResults:
RMS = 0
fw.write(str(i[0]))
for j in header[1:]:
flag = 0
for k in i[1]:
if j[0] == k[0]:
fw.write("\t"+str(float(k[3])-float(k[1])))
RMS += (float(k[3])-float(k[1]))**2
flag = 1
if flag == 0:
fw.write("\t")
fw.write("\t"+str(math.sqrt(RMS))+"\n")
fw.write("\n")
#####################################
# Alignment Features Retention Time #
#####################################
if self.alignmentQC.get() == 1:
# Get results
totalResults = []
for file in glob.glob(os.path.join(str(self.batchFolder),"*.alignment")):
resultBuffer = []
with open (file,'r') as fr:
for line in fr:
line = line.strip().split()
resultBuffer.append(line)
totalResults.append((file,resultBuffer))
# Header
header = []
for i in totalResults:
if len(i[1]) > len(header):
header = i[1][:]
fw.write("Alignment Features Retention Time")
for i in header[1:]:
fw.write("\t"+str(i[0]))
# Actual Data
for i in totalResults:
fw.write(str(i[0]))
for j in header[1:]:
flag = 0
for k in i[1]:
if j[0] == k[0]:
fw.write("\t"+str(float(k[3])))
flag = 1
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
##################################
# Analyte Mass Accuracy (in PPM) #
##################################
if self.qualityControl.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Mass Accuracy [ppm] ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual Data
for j in total:
fw.write(str(j[0]))
for k in compositions:
relContribution = 0.0
targetMass = 0.0
actualMass = 0.0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if m.expInt > relContribution and int(m.charge) == i:
relContribution = m.expInt
targetMass = m.mass
actualMass = m.measMass
except AttributeError:
pass
try:
ppm = ((actualMass - targetMass) / targetMass) * 1000000
fw.write("\t"+str(ppm))
except ZeroDivisionError:
fw.write("\t")
fw.write("\n")
fw.write("\n")
############################
# Isotopic Pattern Quality #
############################
if self.qualityControl.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Isotopic Pattern Quality ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
totalExpInt = 0
qc = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(float(m.obsInt) - float(m.background),0)
totalExpInt += float(m.expInt)
for m in l.isotopes:
if int(m.charge) == i:
try:
maxIntensityBackCorrected = max(float(m.obsInt) - float(m.background),0)
qc += abs((maxIntensityBackCorrected / float(sumInt)) - (m.expInt/totalExpInt))
except ZeroDivisionError:
pass
except AttributeError:
pass
if qc > 0:
fw.write("\t"+str(qc))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
#########################
# Signal to Noise ratio #
#########################
if self.qualityControl.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "S/N ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
expInt = 0
SN = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if m.expInt > expInt and int(m.charge) == i:
try:
SN = (m.obsMax - m.backgroundPoint) / m.noise
except ZeroDivisionError:
pass
expInt = m.expInt
except AttributeError:
pass
if SN > 0:
fw.write("\t"+str(SN))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
#########################################
# Fraction of analytes above S/N cutoff #
#########################################
if self.spectraQualityControl.get() == 1:
ref = []
self.refParser(ref)
times = []
for i in ref:
times.append((i[4],i[5]))
chunks = collections.OrderedDict()
for i in times:
if i not in chunks.keys():
chunks['%s' % '-'.join(i)] = []
for i in ref:
chunks['%s' % '-'.join((i[4],i[5]))].append(i)
# Header
fw.write("Fraction of analytes above S/N Cutoff")
for index,i in enumerate(chunks.keys()):
fw.write("\t"+str(i))
fw.write("\n")
#for index,i in enumerate(chunks.keys()):
# Actual data
for j in total:
fw.write(str(j[0]))
for index,i in enumerate(chunks.keys()):
numberTotal = 0
numberPass = 0
for k in compositions:
expInt = 0
SN = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(i.split("-")[0]) and float(l.timeWindow) == float(i.split("-")[1]):
numberTotal += 1
for m in l.isotopes:
if m.expInt > expInt: # and int(m.charge) == i:
try:
SN = (m.obsMax - m.backgroundPoint) / m.noise
except ZeroDivisionError:
pass
expInt = m.expInt
if SN > S_N_CUTOFF:
numberPass += 1
except AttributeError:
pass
if numberTotal > 0:
fw.write("\t"+str(float(numberPass)/float(numberTotal)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
def writeResults(self,results,file):
""" This function writes the resultes per file away to a raw
file.
INPUT: A file name and a list of results
OUTPUT: A raw file per measurement
"""
outFile = os.path.split(file)[-1]
outFile = outFile.split(".")[0]
outFile = outFile+".raw"
outFile = os.path.join(self.batchFolder,outFile)
with open(outFile,'w') as fw:
fw.write(str(file)+"\n")
fw.write("Composition\tCharge\tIsotope\tExact Mass\tAccurate Mass\tTime\tTheor Area\tMass Window\tTime Window\tArea\tBackground Area\tBackground Point\tNoise\tMax Intensity\n")
for index,i in enumerate(results):
composition, charge, isotope = "_".join(i[4][0].split("_")[:-2]), i[4][0].split("_")[-2], i[4][0].split("_")[-1]
fw.write(str(composition)+"\t"+str(charge)+"\t"+str(isotope)+"\t"+str(i[4][1])+"\t"+str(i[2])+"\t"+str(i[4][4])+"\t"+str(i[4][2])+"\t"+str(i[4][3])+"\t"+str(i[4][5])+"\t"+str(i[0])+"\t"+str(i[1][1])+"\t"+str(i[1][0])+"\t"+str(i[1][2])+"\t"+str(i[3])+"\n")
def batchPopup(self,master):
""" This function creates a pop up box in which all the parameters
for a batch process can be set and visualized. The window can
access and set the masters alFile, refFile and batchFolder.
The window can also call the outputPopup function (to specify
the contents of final summary) and start the actual
batchProcess function.
INPUT: None
OUTPUT: None
"""
if master.batchWindow == 1:
return
master.batchWindow = 1
self.al = tk.StringVar()
self.ref = tk.StringVar()
self.folder = tk.StringVar()
if master.alFile:
self.al.set(master.alFile)
if master.refFile:
self.ref.set(master.refFile)
if master.batchFolder:
self.folder.set(master.batchFolder)
def alButton():
master.openAlFile()
self.al.set(master.alFile)
def refButton():
master.openRefFile()
self.ref.set(master.refFile)
def batchButton():
master.openBatchFolder()
self.folder.set(master.batchFolder)
def close(self):
master.batchWindow = 0
top.destroy()
def run():
master.batchWindow = 0
top.destroy()
master.batchProcess(master)
top = self.top = tk.Toplevel()
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.aligns = tk.Button(top, text = "Alignment File", widt = 25, command = lambda: alButton())
self.aligns.grid(row = 2, column = 0, sticky = tk.W)
self.alLabel = tk.Label(top, textvariable = self.al, width = 25)
self.alLabel.grid(row = 2, column = 1)
self.calibrate = tk.Checkbutton(top, text = "Calibration", variable = master.calFile, onvalue = 1, offvalue = 0)
self.calibrate.grid(row = 3, column = 0, sticky = tk.W)
self.compos = tk.Button(top, text = "Reference File", width = 25, command = lambda: refButton())
self.compos.grid(row = 4, column = 0, sticky = tk.W)
self.com = tk.Label(top, textvariable = self.ref, width = 25)
self.com.grid(row = 4, column = 1)
self.batchDir = tk.Button(top, text = "Batch Directory", width = 25, command = lambda: batchButton())
self.batchDir.grid(row = 5, column = 0, sticky = tk.W)
self.batch = tk.Label(top, textvariable = self.folder, width = 25)
self.batch.grid(row = 5, column = 1)
self.output = tk.Button(top, text = "Output Format", width = 25, command = lambda: master.outputPopup(master))
self.output.grid(row = 6, column = 0,columnspan = 2)
self.run = tk.Button(top, text = "Run Batch Process", width = 25, command = lambda: run())
self.run.grid(row = 7, column = 0, columnspan = 2)
#top.lift()
# Couple the attributes to button presses
top.attributes("-topmost", True)
def outputPopup(self,master):
""" This function creates a pop up box to specify what output
should be shown in the final summary. The default value for all
variables is off (0) and by ticking a box it is set to on (1).
INPUT: None
OUTPUT: None
"""
if master.outputWindow == 1:
return
master.outputWindow = 1
def select_all(self):
master.analyteIntensity.set(1)
master.analyteRelIntensity.set(1)
master.analyteBackground.set(1)
master.analyteNoise.set(1)
master.analytePerCharge.set(1)
master.analyteBckSub.set(1)
master.normalizeCluster.set(1)
master.alignmentQC.set(1)
master.qualityControl.set(1)
master.spectraQualityControl.set(1)
def select_none(self):
master.analyteIntensity.set(0)
master.analyteRelIntensity.set(0)
master.analyteBackground.set(0)
master.analyteNoise.set(0)
master.analytePerCharge.set(0)
master.analyteBckSub.set(0)
master.normalizeCluster.set(0)
master.alignmentQC.set(0)
master.qualityControl.set(0)
master.spectraQualityControl.set(0)
def close(self):
master.outputWindow = 0
top.destroy()
top = self.top = tk.Toplevel()
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.all = tk.Button(top, text = "Select All", command = lambda: select_all(self))
self.all.grid(row = 0, column = 0, sticky = tk.W)
self.none = tk.Button(top, text = "Select None", command = lambda: select_none(self))
self.none.grid(row = 0, column = 1, sticky = tk.E)
self.text1 = tk.Label(top, text = "Base Outputs", font="bold")
self.text1.grid(row = 1, column = 0, sticky = tk.W)
self.text2 = tk.Label(top, text = "Output Modifiers", font="bold")
self.text2.grid(row = 1, column = 1, sticky = tk.W)
# Analyte Intensity (*,#)
self.ai = tk.Checkbutton(top, text = u"Analyte Intensity\u00B9\u00B7\u00B2", variable = master.analyteIntensity, onvalue = 1, offvalue = 0)
self.ai.grid(row = 2, column = 0, sticky = tk.W)
self.ri = tk.Checkbutton(top, text = u"Relative Intensity\u00B9\u00B7\u00B2\u00B7\u00B3", variable = master.analyteRelIntensity, onvalue = 1, offvalue = 0)
self.ri.grid(row = 3, column = 0, sticky = tk.W)
self.back = tk.Checkbutton(top, text = u"Analyte Background\u00B9", variable = master.analyteBackground, onvalue = 1, offvalue = 0)
self.back.grid(row = 4, column = 0, sticky = tk.W)
self.analNoise = tk.Checkbutton(top, text = u"Analyte Noise\u00B9", variable = master.analyteNoise, onvalue = 1, offvalue = 0)
self.analNoise.grid(row = 5, column = 0, sticky = tk.W)
self.chargeState = tk.Checkbutton(top, text = u"\u00B9Intensities per Charge State", variable = master.analytePerCharge, onvalue = 1, offvalue = 0)
self.chargeState.grid(row = 2, column = 1, sticky = tk.W)
self.bckSub = tk.Checkbutton(top, text = u"\u00B2Background subtracted Intensities", variable = master.analyteBckSub, onvalue = 1, offvalue = 0)
self.bckSub.grid(row = 3, column = 1, sticky = tk.W)
self.norClus = tk.Checkbutton(top, text = u"\u00B3Normalization per cluster", variable = master.normalizeCluster, onvalue = 1, offvalue = 0)
self.norClus.grid(row = 4, column = 1, sticky = tk.W)
self.align = tk.Checkbutton(top, text="Alignment QC", variable=master.alignmentQC, onvalue=1, offvalue=0)
self.align.grid(row = 6, column=0, sticky=tk.W)
self.qc = tk.Checkbutton(top, text = "Analyte QC", variable = master.qualityControl, onvalue = 1, offvalue = 0)
self.qc.grid(row = 7, column = 0, sticky = tk.W)
self.specQC = tk.Checkbutton(top, text="Spectral QC", variable = master.spectraQualityControl, onvalue=1, offvalue=0)
self.specQC.grid(row = 8, column = 0, sticky = tk.W)
self.button = tk.Button(top,text='Ok',command = lambda: close(self))
self.button.grid(row = 9, column = 0, columnspan = 2)
top.lift()
def binarySearch(self, array, target, high, direction):
"""Returns element number directly to the left in array 'array'
of specified element 'target', assuming 'array[x][0]' is sorted,
if direction is set as 'left'.
The return value a is such that all elements in array[:a] have
element < target, and all e in array[a:] have element >= target.
Returns element number directly to the right in array 'array'
of specified element 'target', assuming 'array[x][0]' is sorted,
if direction is set as 'right'
The return value a is such that all elements in array[:a] have
element <= target, and all e in array[a:] have element > target.
former left"""
if target >= array[0][0] and target <= array[high][0]:
a = 0
b = high
while a < b:
mid=(a+b)//2
if direction == 'left':
if array[mid][0] < target:
a=mid+1
else:
b=mid
if direction == 'right':
if array[mid][0] > target:
b = mid
else:
a = mid+1
return a
def openFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then read (by the readData
function) and the read data is used to plot the selected spectrum
on the screen (by the plotData function).
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'inputFile',file_path)
def openCalFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then set to the
self.calFile variable.
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'calFile',file_path)
def openBatchFolder(self):
""" This function opens a Tkinter filedialog, asking the user
to select a directory. The chosen directory is then set to the
self.batchFolder variable.
INPUT: None
OUTPUT: None
"""
folder_path = filedialog.askdirectory()
if not folder_path:
pass
else:
setattr(self,'batchFolder',folder_path)
def openRefFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then set to the
self.refFile variable.
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'refFile',file_path)
def openAlFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then set to the
self.alFile variable.
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'alFile',file_path)
def processBlock(self, block, array, readTimes):
""" This function processes a data block as taken from the input
file.
INPUT: A data block from the mzXML file
OUTPUT: None
"""
#if "scan num" in block:
# scan = block.split("scan num")[1]
# scan = scan.split("\"")[1]
if "retentionTime" in block:
rt = block.split("retentionTime")[1]
rt = rt.split("\"")[1]
if rt[0] == 'P':
rt = rt[2:-1]
#if "peaksCount" in block:
# peaks = block.split("peaksCount")[1]
# FIX not to catch zlib in encoded data
if '"zlib"' in block:
compression = True
# FIX for implicit no compression
else:
compression = False
if "byteOrder" in block:
byteOrder = block.split("byteOrder")[1]
byteOrder = byteOrder.split("\"")[1]
if "precision" in block:
precision = block.split("precision")[1]
precision = precision.split("\"")[1]
# FIX pairOrder is Bruker format bending
if "contentType" in block or "pairOrder" in block:
peaks = block.split('"m/z-int">')[1]
peaks = peaks.split("</peaks>")[0]
if peaks:
if readTimes:
flag = 0
for i in readTimes:
if float(rt) >= i[0] and float(rt) <= i[1]:
self.mzXMLDecoder(rt, peaks, precision, compression, byteOrder, array)
flag = 1
if flag == 0:
array.append((float(rt),None))
else:
self.mzXMLDecoder(rt, peaks, precision, compression, byteOrder, array)
######################################################
# START OF FUNCTIONS RELATED TO PARSING ANALYTE FILE #
######################################################
#def getChanceNetwork(self,(mass,carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36)):
def getChanceNetwork(self, foo):
""" This function calculates the total chance network based on
all the individual distributions. The function multiplies all
the chances to get a single chance for a single option.
INPUT: A list containing the Analyte m/z followed by several
other lists (1 for each isotopic state).
OUTPUT: A list of float tuples (isotopic m/z, isotopic chance)
"""
mass,carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36 = foo
totals = []
for x in itertools.product(carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36):
i, j, k, l, m, n, o, p = x
totals.append((mass+i[0]+j[0]+k[0]+l[0]+m[0]+n[0]+o[0]+p[0],
i[1]*j[1]*k[1]*l[1]*m[1]*n[1]*o[1]*p[1]))
return totals
def mergeChances(self,totals):
""" This function merges all the isotopic chances based on the
specified resolution of the machine.
INPUT: A list of float tuples (isotopic m/z, isotopic chance)
OUTPUT: A sorted list of float tuples (isotopic m/z, isotopic
chance).
"""
results = []
newdata = {d: True for d in totals}
for k, v in totals:
if not newdata[(k,v)]: continue
newdata[(k,v)] = False
# use each piece of data only once
keys,values = [k*v],[v]
for kk, vv in [d for d in totals if newdata[d]]:
if abs(k-kk) < EPSILON:
keys.append(kk*vv)
values.append(vv)
newdata[(kk,vv)] = False
results.append((sum(keys)/sum(values),sum(values)))
return results
def calcDistribution(self, element, number):
""" This function calculates the fraction of the total intensity
that is present in each isotope of the given element based on
a binomial distribution. The function takes the name of the
element and the number of atoms of said element as an input and
returns a list of (m/z,fraction) tuples. The number of isotopes
that is returned is dependant on the distribution, once fractions
fall below 0.001 the function stops.
INPUT1: A string containing the code for the element (ie 33S)
INPUT2: An integer listing the number of atoms
OUTPUT: A list of float tuples (isotope m/z, isotope fraction).
"""
fractions = []
for i in element:
lastFraction = 0.
j = 0
while j <= number:
nCk = math.factorial(number) / (math.factorial(j) * math.factorial(number - j))
f = nCk * i[1]**j * (1 - i[1])**(number-j)
fractions.append((i[2]*j,f))
j+= 1
if f < 0.001 and f < lastFraction:
break
lastFraction = f
return fractions
def parseAnalyte(self,Analyte):
""" This function splits the Analyte input string into a parts
and calculates the total number of each element of interest per
Analyte. The function will then attach further elements based on
the user specified mass modifiers before calling the isotopic
distribution function. The function finally returns a list
containing the analyte mass and distribution lists for each
isotopic state.
INPUT: A string containing the Analyte (ie 'H4N4')
OUTPUT: A list containing the Analyte m/z followed by several
other lists (1 for each isotopic state).
"""
results = []
mass = 0
numCarbons = 0
numHydrogens = 0
numNitrogens = 0
numOxygens = 0
numSulfurs = 0
totalElements = 0
units = ["".join(x) for _,x in itertools.groupby(Analyte,key=str.isdigit)]
# Calculate the bass composition values
for index,j in enumerate(units):
present_flag = False
for k in UNITS:
if j == k:
try:
int(units[index+1])
except:
messagebox.showinfo(
"Error Message","There is no number "+
"specified for building block "+
str(j))
sys.exit()
mass += float(BLOCKS[k]['mass']) * float(units[index+1])
numCarbons += int(BLOCKS[k]['carbons']) * int(units[index+1])
numHydrogens += int(BLOCKS[k]['hydrogens']) * int(units[index+1])
numNitrogens += int(BLOCKS[k]['nitrogens']) * int(units[index+1])
numOxygens += int(BLOCKS[k]['oxygens']) * int(units[index+1])
numSulfurs += int(BLOCKS[k]['sulfurs']) * int(units[index+1])
present_flag = True
if present_flag == False and j.isalpha():
messagebox.showinfo(
"Error Message","The specified building block of "+
str(j)+" is unknown. Did you create the building "+
"block in the LaCyTools blocks directory?")
sys.exit()
# Attach the mass modifier values
for j in MASS_MODIFIERS:
mass += float(BLOCKS[j]['mass'])
numCarbons += float(BLOCKS[j]['carbons'])
numHydrogens += int(BLOCKS[j]['hydrogens'])
numNitrogens += int(BLOCKS[j]['nitrogens'])
numOxygens += int(BLOCKS[j]['oxygens'])
numSulfurs += int(BLOCKS[j]['sulfurs'])
# Calculate the distribution for the given value
carbons = self.calcDistribution(C,numCarbons)
hydrogens = self.calcDistribution(H,numHydrogens)
nitrogens = self.calcDistribution(N,numNitrogens)
oxygens17 = self.calcDistribution(O17,numOxygens)
oxygens18 = self.calcDistribution(O18,numOxygens)
sulfurs33 = self.calcDistribution(S33,numSulfurs)
sulfurs34 = self.calcDistribution(S34,numSulfurs)
sulfurs36 = self.calcDistribution(S36,numSulfurs)
return ((mass,carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36))
def initCompositionMasses(self, file):
""" This function reads the composition file. Calculates the
masses for the compositions read from the composition file.
The function then calculates the mass and fraction of total
ions that should be theoretically present in. The final output
is a modified reference list containing each analyte's structure
and window followed by a list of isotope m/z and isotopic
fraction.
INPUT: A string containing the path of the composition file
OUTPUT: None
"""
lines = []
with open(file,'r') as fr:
for line in fr:
line = line.rstrip()
lines.append(line)
# Chop composition into sub units and get exact mass & carbon count
analyteFile = os.path.join(self.batchFolder,"analytes.ref")
if OVERWRITE_ANALYTES == False:
print ("USING EXISTING REFERENCE FILE")
return
elif OVERWRITE_ANALYTES == True:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tPRE-PROCESSING REFERENCE FILE\n")
with open(analyteFile,'w') as fw:
fw.write("# Peak\tm/z\tRel Area\twindow\trt\ttime window\tCalibration\n")
for i in lines:
try:
if i[0] == "#":
continue
except IndexError:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tIncorrect line observed in: "+str(analyteFile)+"\n")
except:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tUnexpected Error: "+str(sys.exc_info()[0])+"\n")
i = i.split("\t")
# Initialize variables
massWindow = MASS_WINDOW
timeWindow = TIME_WINDOW
minCharge = MIN_CHARGE
maxCharge = MAX_CHARGE
calibration = False
# Check optional variables
if len(i) >= 2:
time = float(i[1])
if len(i) > 2:
if i[2]:
massWindow = float(i[2])
if len(i) > 3:
if i[3]:
timeWindow = int(i[3])
if len(i) > 4:
if i[4]:
minCharge = int(i[4])
if len(i) > 5:
if i[5]:
maxCharge = int(i[5])
if len(i) > 6:
if i[6]:
calibration = True
# End of variable check
values = self.parseAnalyte(i[0])
totals = self.getChanceNetwork(values)
results = self.mergeChances(totals)
results = self.selectIsotopes(results)
# Write analyte file
for j in range(minCharge,maxCharge+1):
# Adding the padding windows to determine IPQ
for k in range(-EXTRACTION_PADDING, 0):
fw.write(str(i[0])+"_"+str(j)+"_"+str(k)+"\t"+str((results[0][0]+k*BLOCKS[CHARGE_CARRIER[0]]['mass']+j*BLOCKS[CHARGE_CARRIER[0]]['mass'])/j)+"\t"+str(0)+"\t"+str(massWindow)+"\t"+str(time)+"\t"+str(timeWindow)+"\tFalse\n")
maxIsotope = max(results,key=lambda tup:tup[1])[1]
for k in results:
if calibration == True and k[1] == maxIsotope:
fw.write(str(i[0])+"_"+str(j)+"_"+str(k[2])+"\t"+str((k[0]+j*BLOCKS[CHARGE_CARRIER[0]]['mass'])/j)+"\t"+str(k[1])+"\t"+str(massWindow)+"\t"+str(time)+"\t"+str(timeWindow)+"\tTrue\n")
else:
fw.write(str(i[0])+"_"+str(j)+"_"+str(k[2])+"\t"+str((k[0]+j*BLOCKS[CHARGE_CARRIER[0]]['mass'])/j)+"\t"+str(k[1])+"\t"+str(massWindow)+"\t"+str(time)+"\t"+str(timeWindow)+"\tFalse\n")
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tPRE-PROCESSING COMPLETE\n")
else:
print ("Incorrect value for the OVERWRITE_ANALYTES parameter")
####################################################
# END OF FUNCTIONS RELATED TO PARSING ANALYTE FILE #
####################################################
def extractData(self,ref,array,results):
""" This is the controller function for quantitation of data.
INPUT: A ref file and input file
OUTPUT: A list of results consisting of (area, 'background',
accurate mass, maximum intensity point and the current
analyte)
"""
if self.refFile == "":
messagebox.showinfo("Error Message","No reference file selected")
return
if self.inputFile == "":
messagebox.showinfo("Error Message","No input file selected")
return
analyteBuffer = ""
for i in ref:
intensity = 0
x_points = []
y_points = []
maximum = (0,0)
maxInt = 0
# Not pretty but it fixes the charge not being taken into account
# with extraction and background determination
charge = int(i[0].split("_")[-2])
if '#' in i[0]:
pass
else:
lowMz = self.binarySearch(array,float(i[1])-float(i[3]),len(array)-1,'left')
highMz = self.binarySearch(array,float(i[1])+float(i[3]),len(array)-1,'right')
if "_".join(i[0].split("_")[:-1]) == analyteBuffer:
pass
else:
background = self.getBackground(array, float(i[1]), charge, float(i[3]))
analyteBuffer = "_".join(i[0].split("_")[:-1])
if lowMz and highMz:
try:
range(lowMz,highMz)
except TypeError:
print ("\nReference: "+str(i[0])+" has incorrect m/z parameters")
input("Press ENTER to exit")
sys.exit()
for k in range(lowMz, highMz):
# Get maximum point for S/N calculation
if int(array[k][1]) > maxInt:
maxInt = int(array[k][1])
if EXTRACTION_TYPE == 1:
if int(array[k][1]) > intensity:
intensity = int(array[k][1])
elif EXTRACTION_TYPE == 0:
intensity += int(array[k][1])
elif EXTRACTION_TYPE == 2:
intensity += array[k][1] * ((array[highMz][0] - array[lowMz][0]) / (highMz - lowMz))
# We need these to get the local maxima
x_points.append(array[k][0])
y_points.append(array[k][1])
######################################################################
# Only spend time on doing this if we actually wanted the PPM Errors #
# This is not being used yet, but should! #
######################################################################
if self.qualityControl.get() == 1:
try:
newX = numpy.linspace(x_points[0],x_points[-1],2500*(x_points[-1]-x_points[0]))
f = InterpolatedUnivariateSpline(x_points,y_points)
ySPLINE = f(newX)
for index, j in enumerate(ySPLINE):
if j > maximum[1]:
maximum = (newX[index],j)
except ValueError:
data = zip(x_points,y_points)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tGuassian Curve Fit failed for analyte: "+str(i[0])+", reverting to non fitted local maximum\n")
for j in data:
if j[1] > maximum[1]:
maximum = (j[0],j[1])
except:
print ("Analyte: "+str(i[0])+" is being troublesome, kill it")
else:
intensity = 0
background = (0,0,0)
maximum = (0,0)
# Check if maxima above S/N cut-off
results.append((intensity,background,maximum[0],maxInt,i))
if self.batchProcessing == 1:
return results
else:
for i in results:
print (i)
def getBackground(self, array, target, charge, width):
""" This functin will determine the background and noise for a
given analyte.
INPUT: The spectrum in array form, the exact m/z of the analyte,
the charge of the analyte and the m/z window
OUTPUT: A list of (the average background, the background area
and the noise)
"""
backgroundPoint = 1000000000000000000000000000000000000000000000000000 # Ridiculous start value
totals = []
for i in numpy.arange(-BACKGROUND_WINDOW,BACKGROUND_WINDOW,1.0/charge):
windowAreas = []
windowIntensities = []
windowMz = []
begin = self.binarySearch(array,(float(target)-i*C[0][2])-float(width),len(array)-1,'left')
end = self.binarySearch(array,(float(target)-i*C[0][2])+float(width),len(array)-1,'right')
if begin == None or end == None:
print ("Specified m/z value of " +str((float(target)-i*C[0][2])-float(width)) + " or " + str((float(target)-i*C[0][2])+float(width))+ " outside of spectra range")
input("Press enter to exit")
sys.exit()
for j in array[begin:end]:
windowAreas.append(j[1] * ((array[end][0] - array[begin][0]) / (end - begin)))
windowIntensities.append(j[1])
windowMz.append(j[0])
totals.append((windowAreas,windowIntensities,windowMz))
# Find the set of 5 consecutive windows with lowest average intensity
if self.background == "MIN":
for i in range(0,(2*BACKGROUND_WINDOW)-4):
mix = totals[i][1]+totals[i+1][1]+totals[i+2][1]+totals[i+3][1]+totals[i+4][1]
avgBackground = numpy.average([sum(totals[i][0]),sum(totals[i+1][0]),sum(totals[i+2][0]),sum(totals[i+3][0]),sum(totals[i+4][0])])
dev = numpy.std(mix)
avg = numpy.average(mix)
if avg < backgroundPoint:
backgroundPoint = avg
backgroundArea = avgBackground
if self.noise == "RMS":
noise = dev
elif self.noise == "MM":
noise = max(mix) - min(mix)
# Find the set of 5 consecutive windows with median average intensity
elif self.background == "MEDIAN":
values = []
for i in range(0, (2*BACKGROUND_WINDOW)-4):
mix = totals[i][1]+totals[i+1][1]+totals[i+2][1]+totals[i+3][1]+totals[i+4][1]
avgBackground = numpy.average([sum(totals[i][0]), sum(totals[i+1][0]), sum(totals[i+2][0]), sum(totals[i+3][0]), sum(totals[i+4][0])])
dev = numpy.std(mix)
avg = numpy.average(mix)
if self.noise == "RMS":
noise = dev
elif self.noise == "MM":
noise = max(mix) - min(mix)
values.append((avg, avgBackground, noise))
sortedValues = sorted(values, key=lambda x: x[0])
a, b, c = zip(*sortedValues)
backgroundPoint = a[len(a)//2]
backgroundArea = b[len(b)//2]
noise = c[len(c)//2]
# NOBAN METHOD
elif self.background == "NOBAN":
dataPoints = []
for i in range(0, (2*BACKGROUND_WINDOW)):
dataPoints.extend(totals[i][1])
sortedData = sorted(dataPoints)
startSize = int(0.25 * float(len(sortedData)))
currSize = startSize
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
directionFlag = 0
for k in range(0,len(sortedData)-(startSize+1)):
if sortedData[currSize+1] < currAverage + 3 * currNoise:
directionFlag == 1
currSize += 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
if sortedData[currSize-1] > currAverage + 3 * currNoise and directionFlag == 0:
currSize -= 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
break
# Get Area
# Get length and spacing of window
windowLength = 0
for i in range(0, (2*BACKGROUND_WINDOW)):
if len(totals[i][1]) > windowLength:
windowLength = len(totals[i][1])
spacing = (max(totals[i][2])-min(totals[i][2])) / windowLength
currArea = windowLength * (currAverage * spacing)
# Assign values to generic names
backgroundPoint = currAverage
backgroundArea = currArea
noise = currNoise
return (backgroundPoint,backgroundArea,noise)
def matchFeatureTimes(self, features):
""" This function takes a list of features/times and combines
them into a singe list, useful for reading only relevant
scans later in the program.
INPUT: A list of (m/z,rt) tuples
OUTPUT: A list of (rt,rt) tuples
"""
wanted = []
features = sorted(features, key=lambda x:x[1])
current = (float(features[0][1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW, float(features[0][1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW)
for i in features:
if float(i[1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW >= current[0] and float(i[1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW < current[1]:
if float(i[1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW > current[1]:
current = (current[0],float(i[1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW)
else:
wanted.append(current)
current = (float(i[1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW, float(i[1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW)
wanted.append(current)
return wanted
def matchAnalyteTimes(self, ref):
""" This function takes a list of references and creates a list
of time tuples, that is needed to read only relevant scans later
in the program.
INPUT: A list of references (name, mz, int, window and so forth)
OUTPUT: A list of (rt,rt) tuples
"""
wanted = []
ref = sorted(ref,key=lambda x:x[4])
for i in ref:
if (float(i[4])-float(i[5])) not in wanted:
wanted.append((float(i[4])-float(i[5]),float(i[4])+float(i[5])))
return list(self.merge_ranges(wanted))
def merge_ranges(self,ranges):
"""
Merge overlapping and adjacent ranges and yield the merged ranges
in order. The argument must be an iterable of pairs (start, stop).
>>> list(merge_ranges([(5,7), (3,5), (-1,3)]))
[(-1, 7)]
>>> list(merge_ranges([(5,6), (3,4), (1,2)]))
[(1, 2), (3, 4), (5, 6)]
>>> list(merge_ranges([]))
[]
Source = http://stackoverflow.com/questions/24130745/convert-generator-object-to-list-for-debugging
"""
ranges = iter(sorted(ranges))
current_start, current_stop = next(ranges)
for start, stop in ranges:
if start > current_stop:
# Gap between segments: output current segment and start a new one.
yield current_start, current_stop
current_start, current_stop = start, stop
else:
# Segments adjacent or overlapping: merge.
current_stop = max(current_stop, stop)
yield current_start, current_stop
def readData(self, array, readTimes):
""" This function reads mzXML files and has the scans decoded on
a per scan basis. The scans are identified by getting the line
number of the beginning and ending tag for a scan.
INPUT: file handle
OUTPUT: TBA
"""
header = True
started = False
block = ""
with open(self.inputFile,'r') as fr:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tProcessing "+str(self.inputFile)+"\n")
for number, line in enumerate(fr):
if '</dataProcessing>' in line:
header = False
if '<scan num="' in line and header == False:
started = True
if started == True:
block +=line
if '</scan>' in line and header == False and started == True:
self.processBlock(block, array, readTimes)
started = False
block = ""
#print "Finished processing "+str(self.inputFile)
def readPTData(self, array, readTimes):
""" TODO by Genadij Razdorov
"""
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tProcessing "+str(self.inputFile)+"\n")
i = self.inputFileIdx
for row in self.ptFile.root.scans.where("sample == i"):
rt, art, idx, size = row[2:]
if art != 0:
rt = art
if readTimes:
for s, e in readTimes:
if rt >= s and rt <= e:
break
else:
array.append((rt, None))
continue
mzs = self.ptFile.root.mzs[idx][:size]
Is = self.ptFile.root.Is[idx][:size]
array.append((rt, numpy.vstack((numpy.cumsum(mzs), Is)).T))
def refParser(self, ref):
"""Reads the reference file and fills the list 'ref' with names
and parameters inherent to the chosen analytes to be integrated.
INPUT: An empty ref list
OUTPUT: A filled ref list
"""
with open(os.path.join(self.batchFolder,"analytes.ref"),'r') as fr:
for line in fr:
if line[0] == "#":
continue
parts=line.rstrip('\n').split('\t')
ref.append(parts)
def mzXMLDecoder(self, rt, peaks, precision, compression, byteOrder, array):
""" This function parses the encoded string from an mzXML file.
The decoded data is finally added to the data array containing
the entire measurement.
INPUT: An encoded string and the data array containing all of
the measuremen that has been processed up to this point
OUTPUT: A data array containing all of the measuremen that has
been processed up to this point
"""
endian = ">"
if byteOrder == 'little':
endian = '<'
elif byteOrder == 'big':
endian = '>'
# get precision
if int(precision) == 64:
precision = '8'
else:
precision = '4'
# decode data
data = base64.b64decode(peaks)
# decompression
if compression == True:
data = zlib.decompress(data)
data = numpy.frombuffer(data, dtype=endian + 'f' + precision)
# format
new = numpy.vstack((data[::2], data[1::2])).T
# list notation
array.append((float(rt),new))
# Call the main app
root = tk.Tk()
app = App(root)
root.mainloop()
| apache-2.0 |
aminert/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
ahmadia/bokeh | bokeh/charts/builder/timeseries_builder.py | 17 | 6098 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
Myasuka/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
0asa/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
hristo-vrigazov/behavioral-cloning | model.py | 1 | 2572 | # See models/nvidia_pipeline.py for the model architecture
import sys
import cv2
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from models.nvidia_pipeline import NvidiaPipeLine
from models.vgg_pipeline import VGGPipeline
from models.small_image_pipeline import SmallImagePipeline
from models.comma_ai_pipeline import CommaAiPipeline
from PIL import Image
from utils import get_driving_log_dataframe
from utils import get_callbacks
from keras.models import load_model
# Instead of implementing here, the model is in the
# models/ directory, because this allows to quickly
# switch between different pipelines
pipeline = NvidiaPipeLine()
BATCH_SIZE = 32
EPOCHS = 2
# this function is also used in drive.py,
# this way when switching the pipeline the
# preprocessing for driving is also changed
# appropriately
def preprocess(image):
return pipeline.preprocess_image(image)
def train(data_folder, validation_folder, restart_model_path=None):
if restart_model_path:
model = load_model(restart_model_path)
print("Using existing model")
else:
model = pipeline.get_model()
model.compile("adam", "mse")
print("Using new model")
samples = pipeline.get_train_samples(get_driving_log_dataframe(data_folder))
train_generator = pipeline.get_train_generator(data_folder, batch_size=BATCH_SIZE)
model.summary()
image_generator = train_generator
validation_generator = pipeline.get_validation_generator(validation_folder, batch_size=BATCH_SIZE)
nb_val_samples = pipeline.get_validation_samples(get_driving_log_dataframe(validation_folder))
# callbacks that save weights after each epoch
callbacks_list = get_callbacks()
model.fit_generator(image_generator,
samples_per_epoch=samples,
nb_epoch=EPOCHS,
callbacks=callbacks_list,
validation_data=validation_generator,
nb_val_samples=nb_val_samples)
# save everything for possible finetuning in the future
model.save('model-compiled.h5')
json_string = model.to_json()
with open('model.json', 'w') as model_json_file:
model_json_file.write(json_string)
model.save_weights('model.h5')
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage: python model.py train_folder valid_folder [exising_model_to_finetune]')
elif len(sys.argv) < 4:
train(sys.argv[1], sys.argv[2])
else:
train(sys.argv[1], sys.argv[2], sys.argv[3])
| mit |
kdebrab/pandas | pandas/plotting/_tools.py | 5 | 12814 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
from math import ceil
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCDataFrame
from pandas.compat import range
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
`ax`: Matplotlib axes object
`data`: DataFrame or Series
data for table contents
`kwargs`: keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
"""
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, **kwargs)
return table
def _get_layout(nplots, layout=None, layout_type='box'):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError('Layout of {nrows}x{ncols} must be larger '
'than required size {nplots}'.format(
nrows=nrows, ncols=ncols, nplots=nplots))
return layout
if layout_type == 'single':
return (1, 1)
elif layout_type == 'horizontal':
return (1, nplots)
elif layout_type == 'vertical':
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box',
**fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is "
"ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified "
"when creating axes", UserWarning,
stacklevel=4)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the "
"same as the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing "
"the passed axes is being cleared", UserWarning,
stacklevel=4)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
try:
# set_visible will not be effective if
# minor axis has NullLocator and NullFormattor (default)
import matplotlib.ticker as ticker
if isinstance(axis.get_minor_locator(), ticker.NullLocator):
axis.set_minor_locator(ticker.AutoLocator())
if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
axis.set_minor_formatter(ticker.FormatStrFormatter(''))
for t in axis.get_minorticklabels():
t.set_visible(False)
except Exception: # pragma no cover
raise
axis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
layout[ax.rowNum, ax.colNum] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[ax.rowNum + 1, ax.colNum]:
continue
if sharex or len(ax.get_shared_x_axes()
.get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes()
.get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
def _flatten(axes):
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, ABCIndexClass)):
return axes.ravel()
return np.array(axes)
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, 'right_ax'):
lines += ax.right_ax.get_lines()
if hasattr(ax, 'left_ax'):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(np.nanmin(x), left)
right = max(np.nanmax(x), right)
return left, right
def _set_ticks_props(axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
| bsd-3-clause |
astropy/astropy | astropy/visualization/wcsaxes/tests/test_display_world_coordinates.py | 8 | 6507 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.pyplot as plt
import pytest
from matplotlib.backend_bases import KeyEvent
import numpy as np
import astropy.units as u
from astropy.coordinates import FK5, SkyCoord
from astropy.io import fits
from astropy.time import Time
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
from astropy.coordinates import galactocentric_frame_defaults
from .test_images import BaseImageTests
class TestDisplayWorldCoordinate(BaseImageTests):
def teardown_method(self, method):
plt.close('all')
def test_overlay_coords(self, ignore_matplotlibrc, tmpdir):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test1.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '0\xb029\'45" -0\xb029\'20" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
event3 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event3.key, guiEvent=event3)
# Test that it still displays world coords when there are no overlay coords
string_world2 = ax._display_world_coords(0.523412, 0.518311)
assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)'
overlay = ax.get_coords_overlay('fk5')
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test2.png').strpath)
event4 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world3 = ax._display_world_coords(0.523412, 0.518311)
assert string_world3 == '267.176\xb0 -28\xb045\'56" (world, overlay 1)'
overlay = ax.get_coords_overlay(FK5())
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test3.png').strpath)
event5 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world4 = ax._display_world_coords(0.523412, 0.518311)
assert string_world4 == '267.176\xb0 -28\xb045\'56" (world, overlay 2)'
overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test4.png').strpath)
event6 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event5.key, guiEvent=event6)
# Test that it displays the overlay world coordinates
string_world5 = ax._display_world_coords(0.523412, 0.518311)
assert string_world5 == '267.652\xb0 -28\xb046\'23" (world, overlay 3)'
def test_cube_coords(self, ignore_matplotlibrc, tmpdir):
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x'))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '3h26m52.0s 30\xb037\'17\" 2563 (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
def test_cube_coords_uncorr_slicing(self, ignore_matplotlibrc, tmpdir):
# Regression test for a bug that occurred with coordinate formatting if
# some dimensions were uncorrelated and sliced out.
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('x', 'y', 2))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '3h26m56.6s 30\xb018\'19\" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
def test_plot_coord_3d_transform(self):
wcs = WCS(self.msx_header)
with galactocentric_frame_defaults.set('latest'):
coord = SkyCoord(0 * u.kpc, 0 * u.kpc, 0 * u.kpc, frame='galactocentric')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wcs)
point, = ax.plot_coord(coord, 'ro')
np.testing.assert_allclose(point.get_xydata()[0], [0, 0], atol=1e-4)
| bsd-3-clause |
Agent007/deepchem | contrib/dragonn/models.py | 6 | 16267 | from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from dragonn.metrics import ClassificationResult
from keras.layers.core import (Activation, Dense, Dropout, Flatten, Permute,
Reshape, TimeDistributedDense)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
from keras.layers.core import (Activation, Dense, Flatten,
TimeDistributedDense)
from keras.layers.recurrent import GRU
from keras.callbacks import EarlyStopping
#class SequenceDNN(Model):
# """
# Sequence DNN models.
#
# Parameters
# ----------
# seq_length : int, optional
# length of input sequence.
# keras_model : instance of keras.models.Sequential, optional
# seq_length or keras_model must be specified.
# num_tasks : int, optional
# number of tasks. Default: 1.
# num_filters : list[int] | tuple[int]
# number of convolutional filters in each layer. Default: (15,).
# conv_width : list[int] | tuple[int]
# width of each layer's convolutional filters. Default: (15,).
# pool_width : int
# width of max pooling after the last layer. Default: 35.
# L1 : float
# strength of L1 penalty.
# dropout : float
# dropout probability in every convolutional layer. Default: 0.
# verbose: int
# Verbosity level during training. Valida values: 0, 1, 2.
#
# Returns
# -------
# Compiled DNN model.
# """
#
# def __init__(self,
# seq_length=None,
# keras_model=None,
# use_RNN=False,
# num_tasks=1,
# num_filters=(15, 15, 15),
# conv_width=(15, 15, 15),
# pool_width=35,
# GRU_size=35,
# TDD_size=15,
# L1=0,
# dropout=0.0,
# num_epochs=100,
# verbose=1):
# self.num_tasks = num_tasks
# self.num_epochs = num_epochs
# self.verbose = verbose
# self.train_metrics = []
# self.valid_metrics = []
# if keras_model is not None and seq_length is None:
# self.model = keras_model
# self.num_tasks = keras_model.layers[-1].output_shape[-1]
# elif seq_length is not None and keras_model is None:
# self.model = Sequential()
# assert len(num_filters) == len(conv_width)
# for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
# conv_height = 4 if i == 0 else 1
# self.model.add(
# Convolution2D(
# nb_filter=nb_filter,
# nb_row=conv_height,
# nb_col=nb_col,
# activation='linear',
# init='he_normal',
# input_shape=(1, 4, seq_length),
# W_regularizer=l1(L1),
# b_regularizer=l1(L1)))
# self.model.add(Activation('relu'))
# self.model.add(Dropout(dropout))
# self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
# if use_RNN:
# num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
# self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
# self.model.add(Permute((2, 1)))
# self.model.add(GRU(GRU_size, return_sequences=True))
# self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
# self.model.add(Flatten())
# self.model.add(Dense(output_dim=self.num_tasks))
# self.model.add(Activation('sigmoid'))
# self.model.compile(optimizer='adam', loss='binary_crossentropy')
# else:
# raise ValueError(
# "Exactly one of seq_length or keras_model must be specified!")
#
# def train(self,
# X,
# y,
# validation_data,
# early_stopping_metric='Loss',
# early_stopping_patience=5,
# save_best_model_to_prefix=None):
# if y.dtype != bool:
# assert set(np.unique(y)) == {0, 1}
# y = y.astype(bool)
# multitask = y.shape[1] > 1
# if not multitask:
# num_positives = y.sum()
# num_sequences = len(y)
# num_negatives = num_sequences - num_positives
# if self.verbose >= 1:
# print('Training model (* indicates new best result)...')
# X_valid, y_valid = validation_data
# early_stopping_wait = 0
# best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
# for epoch in range(1, self.num_epochs + 1):
# self.model.fit(
# X,
# y,
# batch_size=128,
# nb_epoch=1,
# class_weight={
# True: num_sequences / num_positives,
# False: num_sequences / num_negatives
# } if not multitask else None,
# verbose=self.verbose >= 2)
# epoch_train_metrics = self.test(X, y)
# epoch_valid_metrics = self.test(X_valid, y_valid)
# self.train_metrics.append(epoch_train_metrics)
# self.valid_metrics.append(epoch_valid_metrics)
# if self.verbose >= 1:
# print('Epoch {}:'.format(epoch))
# print('Train {}'.format(epoch_train_metrics))
# print('Valid {}'.format(epoch_valid_metrics), end='')
# current_metric = epoch_valid_metrics[early_stopping_metric].mean()
# if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
# if self.verbose >= 1:
# print(' *')
# best_metric = current_metric
# best_epoch = epoch
# early_stopping_wait = 0
# if save_best_model_to_prefix is not None:
# self.save(save_best_model_to_prefix)
# else:
# if self.verbose >= 1:
# print()
# if early_stopping_wait >= early_stopping_patience:
# break
# early_stopping_wait += 1
# if self.verbose >= 1:
# print('Finished training after {} epochs.'.format(epoch))
# if save_best_model_to_prefix is not None:
# print("The best model's architecture and weights (from epoch {0}) "
# 'were saved to {1}.arch.json and {1}.weights.h5'.format(
# best_epoch, save_best_model_to_prefix))
#
# def predict(self, X):
# return self.model.predict(X, batch_size=128, verbose=False)
#
# def get_sequence_filters(self):
# """
# Returns 3D array of 2D sequence filters.
# """
# return self.model.layers[0].get_weights()[0].squeeze(axis=1)
#
# def deeplift(self, X, batch_size=200):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) deeplift score array.
# """
# assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1
# from deeplift.conversion import keras_conversion as kc
#
# # convert to deeplift model and get scoring function
# deeplift_model = kc.convert_sequential_model(self.model, verbose=False)
# score_func = deeplift_model.get_target_contribs_func(
# find_scores_layer_idx=0)
# # use a 40% GC reference
# input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, :, None]]
# # get deeplift scores
# deeplift_scores = np.zeros((self.num_tasks,) + X.shape)
# for i in range(self.num_tasks):
# deeplift_scores[i] = score_func(
# task_idx=i,
# input_data_list=[X],
# batch_size=batch_size,
# progress_update=None,
# input_references_list=input_references)
# return deeplift_scores
#
# def in_silico_mutagenesis(self, X):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) ISM score array.
# """
# mutagenesis_scores = np.empty(X.shape + (self.num_tasks,), dtype=np.float32)
# wild_type_predictions = self.predict(X)
# wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
# np.newaxis]
# for sequence_index, (sequence, wild_type_prediction) in enumerate(
# zip(X, wild_type_predictions)):
# mutated_sequences = np.repeat(
# sequence[np.newaxis], np.prod(sequence.shape), axis=0)
# # remove wild-type
# arange = np.arange(len(mutated_sequences))
# horizontal_cycle = np.tile(
# np.arange(sequence.shape[-1]), sequence.shape[-2])
# mutated_sequences[arange, :, :, horizontal_cycle] = 0
# # add mutant
# vertical_repeat = np.repeat(
# np.arange(sequence.shape[-2]), sequence.shape[-1])
# mutated_sequences[arange, :, vertical_repeat, horizontal_cycle] = 1
# # make mutant predictions
# mutated_predictions = self.predict(mutated_sequences)
# mutated_predictions = mutated_predictions.reshape(sequence.shape +
# (self.num_tasks,))
# mutagenesis_scores[
# sequence_index] = wild_type_prediction - mutated_predictions
# return np.rollaxis(mutagenesis_scores, -1)
#
# @staticmethod
# def _plot_scores(X, output_directory, peak_width, score_func, score_name):
# from dragonn.plot import plot_bases_on_ax
# scores = score_func(X).squeeze(
# axis=2) # (num_task, num_samples, num_bases, sequence_length)
# try:
# os.makedirs(output_directory)
# except OSError:
# pass
# num_tasks = len(scores)
# for task_index, task_scores in enumerate(scores):
# for sequence_index, sequence_scores in enumerate(task_scores):
# # sequence_scores is num_bases x sequence_length
# basewise_max_sequence_scores = sequence_scores.max(axis=0)
# plt.clf()
# figure, (top_axis, bottom_axis) = plt.subplots(2)
# top_axis.plot(
# range(1,
# len(basewise_max_sequence_scores) + 1),
# basewise_max_sequence_scores)
# top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
# peak_position = basewise_max_sequence_scores.argmax()
# top_axis.axvspan(
# peak_position - peak_width,
# peak_position + peak_width,
# color='grey',
# alpha=0.1)
# peak_sequence_scores = sequence_scores[:, peak_position - peak_width:
# peak_position + peak_width].T
# # Set non-max letter_heights to zero
# letter_heights = np.zeros_like(peak_sequence_scores)
# letter_heights[np.arange(len(letter_heights)),
# peak_sequence_scores.argmax(axis=1)] = \
# basewise_max_sequence_scores[peak_position - peak_width :
# peak_position + peak_width]
# plot_bases_on_ax(letter_heights, bottom_axis)
# bottom_axis.set_xticklabels(
# tuple(
# map(str,
# np.arange(peak_position - peak_width,
# peak_position + peak_width + 1))))
# bottom_axis.tick_params(axis='x', labelsize='small')
# plt.xlabel('Position')
# plt.ylabel('Score')
# plt.savefig(
# os.path.join(output_directory, 'sequence_{}{}'.format(
# sequence_index, '_task_{}'.format(task_index)
# if num_tasks > 1 else '')))
# plt.close()
#
# def plot_deeplift(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.deeplift,
# score_name='DeepLift')
#
# def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.in_silico_mutagenesis,
# score_name='ISM')
#
# def plot_architecture(self, output_file):
# from dragonn.visualize_util import plot as plot_keras_model
# plot_keras_model(self.model, output_file, show_shape=True)
#
# def save(self, save_best_model_to_prefix):
# arch_fname = save_best_model_to_prefix + '.arch.json'
# weights_fname = save_best_model_to_prefix + '.weights.h5'
# open(arch_fname, 'w').write(self.model.to_json())
# self.model.save_weights(weights_fname, overwrite=True)
#
# @staticmethod
# def load(arch_fname, weights_fname=None):
# model_json_string = open(arch_fname).read()
# sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
# if weights_fname is not None:
# sequence_dnn.model.load_weights(weights_fname)
# return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
self.model = Sequential()
self.model.add(
GRU(gru_size, return_sequences=True, input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X,
y,
batch_size=128,
nb_epoch=100,
validation_data=validation_data,
class_weight={
True: num_sequences / num_positives,
False: num_sequences / num_negatives
} if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self,
prefix='./gkmSVM',
word_length=11,
mismatches=3,
C=1,
threads=1,
cache_memory=100,
verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str,
(word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(('gkmtrain', self.options, pos_fname, neg_fname,
self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join([
'gkmpredict', test_fname, self.model_file, temp_ofp.name, threads_option
])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
| mit |
maaskola/GPy | GPy/examples/dimensionality_reduction.py | 7 | 23628 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as _np
default_seed = 123344
# default_seed = _np.random.seed(123344)
def bgplvm_test_model(optimize=False, verbose=1, plot=False, output_dim=200, nan=False):
"""
model for testing purposes. Samples from a GP with rbf kernel and learns
the samples with a new kernel. Normally not for optimization, just model cheking
"""
import GPy
num_inputs = 13
num_inducing = 5
if plot:
output_dim = 1
input_dim = 3
else:
input_dim = 2
output_dim = output_dim
# generate GPLVM-like data
X = _np.random.rand(num_inputs, input_dim)
lengthscales = _np.random.rand(input_dim)
k = GPy.kern.RBF(input_dim, .5, lengthscales, ARD=True)
K = k.K(X)
Y = _np.random.multivariate_normal(_np.zeros(num_inputs), K, (output_dim,)).T
# k = GPy.kern.RBF_inv(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
# k = GPy.kern.linear(input_dim)# + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
# k = GPy.kern.RBF(input_dim, ARD = False) + GPy.kern.white(input_dim, 0.00001)
# k = GPy.kern.RBF(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.RBF(input_dim, .3, _np.ones(input_dim) * .2, ARD=True)
# k = GPy.kern.RBF(input_dim, .5, 2., ARD=0) + GPy.kern.RBF(input_dim, .3, .2, ARD=0)
# k = GPy.kern.RBF(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.linear(input_dim, _np.ones(input_dim) * .2, ARD=True)
p = .3
m = GPy.models.BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
if nan:
m.inference_method = GPy.inference.latent_function_inference.var_dtc.VarDTCMissingData()
m.Y[_np.random.binomial(1, p, size=(Y.shape)).astype(bool)] = _np.nan
m.parameters_changed()
#===========================================================================
# randomly obstruct data with percentage p
#===========================================================================
# m2 = GPy.models.BayesianGPLVMWithMissingData(Y_obstruct, input_dim, kernel=k, num_inducing=num_inducing)
# m.lengthscales = lengthscales
if plot:
import matplotlib.pyplot as pb
m.plot()
pb.title('PCA initialisation')
# m2.plot()
# pb.title('PCA initialisation')
if optimize:
m.optimize('scg', messages=verbose)
# m2.optimize('scg', messages=verbose)
if plot:
m.plot()
pb.title('After optimisation')
# m2.plot()
# pb.title('After optimisation')
return m
def gplvm_oil_100(optimize=True, verbose=1, plot=True):
import GPy
import pods
data = pods.datasets.oil_100()
Y = data['X']
# create simple GP model
kernel = GPy.kern.RBF(6, ARD=True) + GPy.kern.Bias(6)
m = GPy.models.GPLVM(Y, 6, kernel=kernel)
m.data_labels = data['Y'].argmax(axis=1)
if optimize: m.optimize('scg', messages=verbose)
if plot: m.plot_latent(labels=m.data_labels)
return m
def sparse_gplvm_oil(optimize=True, verbose=0, plot=True, N=100, Q=6, num_inducing=15, max_iters=50):
import GPy
import pods
_np.random.seed(0)
data = pods.datasets.oil()
Y = data['X'][:N]
Y = Y - Y.mean(0)
Y /= Y.std(0)
# Create the model
kernel = GPy.kern.RBF(Q, ARD=True) + GPy.kern.Bias(Q)
m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing)
m.data_labels = data['Y'][:N].argmax(axis=1)
if optimize: m.optimize('scg', messages=verbose, max_iters=max_iters)
if plot:
m.plot_latent(labels=m.data_labels)
m.kern.plot_ARD()
return m
def swiss_roll(optimize=True, verbose=1, plot=True, N=1000, num_inducing=25, Q=4, sigma=.2):
import GPy
from pods.datasets import swiss_roll_generated
from GPy.models import BayesianGPLVM
data = swiss_roll_generated(num_samples=N, sigma=sigma)
Y = data['Y']
Y -= Y.mean()
Y /= Y.std()
t = data['t']
c = data['colors']
try:
from sklearn.manifold.isomap import Isomap
iso = Isomap().fit(Y)
X = iso.embedding_
if Q > 2:
X = _np.hstack((X, _np.random.randn(N, Q - 2)))
except ImportError:
X = _np.random.randn(N, Q)
if plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # @UnusedImport
fig = plt.figure("Swiss Roll Data")
ax = fig.add_subplot(121, projection='3d')
ax.scatter(*Y.T, c=c)
ax.set_title("Swiss Roll")
ax = fig.add_subplot(122)
ax.scatter(*X.T[:2], c=c)
ax.set_title("BGPLVM init")
var = .5
S = (var * _np.ones_like(X) + _np.clip(_np.random.randn(N, Q) * var ** 2,
- (1 - var),
(1 - var))) + .001
Z = _np.random.permutation(X)[:num_inducing]
kernel = GPy.kern.RBF(Q, ARD=True) + GPy.kern.Bias(Q, _np.exp(-2)) + GPy.kern.White(Q, _np.exp(-2))
m = BayesianGPLVM(Y, Q, X=X, X_variance=S, num_inducing=num_inducing, Z=Z, kernel=kernel)
m.data_colors = c
m.data_t = t
if optimize:
m.optimize('bfgs', messages=verbose, max_iters=2e3)
if plot:
fig = plt.figure('fitted')
ax = fig.add_subplot(111)
s = m.input_sensitivity().argsort()[::-1][:2]
ax.scatter(*m.X.mean.T[s], c=c)
return m
def bgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40, max_iters=1000, **k):
import GPy
from matplotlib import pyplot as plt
import numpy as np
_np.random.seed(0)
try:
import pods
data = pods.datasets.oil()
except ImportError:
data = GPy.util.datasets.oil()
kernel = GPy.kern.RBF(Q, 1., 1. / _np.random.uniform(0, 1, (Q,)), ARD=True) # + GPy.kern.Bias(Q, _np.exp(-2))
Y = data['X'][:N]
m = GPy.models.BayesianGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing, **k)
m.data_labels = data['Y'][:N].argmax(axis=1)
if optimize:
m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05)
if plot:
fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
m.plot_latent(ax=latent_axes, labels=m.data_labels)
data_show = GPy.plotting.matplot_dep.visualize.vector_show((m.Y[0, :]))
lvm_visualizer = GPy.plotting.matplot_dep.visualize.lvm_dimselect(m.X.mean.values[0:1, :], # @UnusedVariable
m, data_show, latent_axes=latent_axes, sense_axes=sense_axes, labels=m.data_labels)
raw_input('Press enter to finish')
plt.close(fig)
return m
def ssgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40, max_iters=1000, **k):
import GPy
from matplotlib import pyplot as plt
import pods
_np.random.seed(0)
data = pods.datasets.oil()
kernel = GPy.kern.RBF(Q, 1., 1. / _np.random.uniform(0, 1, (Q,)), ARD=True) # + GPy.kern.Bias(Q, _np.exp(-2))
Y = data['X'][:N]
m = GPy.models.SSGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing, **k)
m.data_labels = data['Y'][:N].argmax(axis=1)
if optimize:
m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05)
if plot:
fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
m.plot_latent(ax=latent_axes, labels=m.data_labels)
data_show = GPy.plotting.matplot_dep.visualize.vector_show((m.Y[0, :]))
lvm_visualizer = GPy.plotting.matplot_dep.visualize.lvm_dimselect(m.X.mean.values[0:1, :], # @UnusedVariable
m, data_show, latent_axes=latent_axes, sense_axes=sense_axes, labels=m.data_labels)
raw_input('Press enter to finish')
plt.close(fig)
return m
def _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim=False):
"""Simulate some data drawn from a matern covariance and a periodic exponential for use in MRD demos."""
Q_signal = 4
import GPy
import numpy as np
np.random.seed(3000)
k = GPy.kern.Matern32(Q_signal, 1., lengthscale=(np.random.uniform(1, 6, Q_signal)), ARD=1)
for i in range(Q_signal):
k += GPy.kern.PeriodicExponential(1, variance=1., active_dims=[i], period=3., lower=-2, upper=6)
t = np.c_[[np.linspace(-1, 5, N) for _ in range(Q_signal)]].T
K = k.K(t)
s2, s1, s3, sS = np.random.multivariate_normal(np.zeros(K.shape[0]), K, size=(4))[:, :, None]
Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS)
slist = [sS, s1, s2, s3]
slist_names = ["sS", "s1", "s2", "s3"]
Ylist = [Y1, Y2, Y3]
if plot_sim:
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import itertools
fig = plt.figure("MRD Simulation Data", figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(2, 1, 1)
labls = slist_names
for S, lab in itertools.izip(slist, labls):
ax.plot(S, label=lab)
ax.legend()
for i, Y in enumerate(Ylist):
ax = fig.add_subplot(2, len(Ylist), len(Ylist) + 1 + i)
ax.imshow(Y, aspect='auto', cmap=cm.gray) # @UndefinedVariable
ax.set_title("Y{}".format(i + 1))
plt.draw()
plt.tight_layout()
return slist, [S1, S2, S3], Ylist
def _simulate_sincos(D1, D2, D3, N, num_inducing, plot_sim=False):
"""Simulate some data drawn from sine and cosine for use in demos of MRD"""
_np.random.seed(1234)
x = _np.linspace(0, 4 * _np.pi, N)[:, None]
s1 = _np.vectorize(lambda x: _np.sin(x))
s2 = _np.vectorize(lambda x: _np.cos(x) ** 2)
s3 = _np.vectorize(lambda x:-_np.exp(-_np.cos(2 * x)))
sS = _np.vectorize(lambda x: _np.cos(x))
s1 = s1(x)
s2 = s2(x)
s3 = s3(x)
sS = sS(x)
s1 -= s1.mean(); s1 /= s1.std(0)
s2 -= s2.mean(); s2 /= s2.std(0)
s3 -= s3.mean(); s3 /= s3.std(0)
sS -= sS.mean(); sS /= sS.std(0)
Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS)
slist = [sS, s1, s2, s3]
slist_names = ["sS", "s1", "s2", "s3"]
Ylist = [Y1, Y2, Y3]
if plot_sim:
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import itertools
fig = plt.figure("MRD Simulation Data", figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(2, 1, 1)
labls = slist_names
for S, lab in itertools.izip(slist, labls):
ax.plot(S, label=lab)
ax.legend()
for i, Y in enumerate(Ylist):
ax = fig.add_subplot(2, len(Ylist), len(Ylist) + 1 + i)
ax.imshow(Y, aspect='auto', cmap=cm.gray) # @UndefinedVariable
ax.set_title("Y{}".format(i + 1))
plt.draw()
plt.tight_layout()
return slist, [S1, S2, S3], Ylist
def _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS):
S1 = _np.hstack([s1, sS])
S2 = _np.hstack([s2, s3, sS])
S3 = _np.hstack([s3, sS])
Y1 = S1.dot(_np.random.randn(S1.shape[1], D1))
Y2 = S2.dot(_np.random.randn(S2.shape[1], D2))
Y3 = S3.dot(_np.random.randn(S3.shape[1], D3))
Y1 += .3 * _np.random.randn(*Y1.shape)
Y2 += .2 * _np.random.randn(*Y2.shape)
Y3 += .25 * _np.random.randn(*Y3.shape)
Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0)
Y3 -= Y3.mean(0)
Y1 /= Y1.std(0)
Y2 /= Y2.std(0)
Y3 /= Y3.std(0)
return Y1, Y2, Y3, S1, S2, S3
def bgplvm_simulation(optimize=True, verbose=1,
plot=True, plot_sim=False,
max_iters=2e4,
):
from GPy import kern
from GPy.models import BayesianGPLVM
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
Y = Ylist[0]
k = kern.Linear(Q, ARD=True) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
# k = kern.RBF(Q, ARD=True, lengthscale=10.)
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
m.X.variance[:] = _np.random.uniform(0, .01, m.X.shape)
m.likelihood.variance = .1
if optimize:
print("Optimizing model:")
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
m.X.plot("BGPLVM Latent Space 1D")
m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
return m
def ssgplvm_simulation(optimize=True, verbose=1,
plot=True, plot_sim=False,
max_iters=2e4, useGPU=False
):
from GPy import kern
from GPy.models import SSGPLVM
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
Y = Ylist[0]
k = kern.Linear(Q, ARD=True) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
# k = kern.RBF(Q, ARD=True, lengthscale=10.)
m = SSGPLVM(Y, Q, init="rand", num_inducing=num_inducing, kernel=k, group_spike=True)
m.X.variance[:] = _np.random.uniform(0, .01, m.X.shape)
m.likelihood.variance = .01
if optimize:
print("Optimizing model:")
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
m.X.plot("SSGPLVM Latent Space 1D")
m.kern.plot_ARD('SSGPLVM Simulation ARD Parameters')
return m
def bgplvm_simulation_missing_data(optimize=True, verbose=1,
plot=True, plot_sim=False,
max_iters=2e4, percent_missing=.1,
):
from GPy import kern
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
Y = Ylist[0]
k = kern.Linear(Q, ARD=True) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
inan = _np.random.binomial(1, percent_missing, size=Y.shape).astype(bool) # 80% missing data
Ymissing = Y.copy()
Ymissing[inan] = _np.nan
m = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
m.Yreal = Y
if optimize:
print("Optimizing model:")
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
m.X.plot("BGPLVM Latent Space 1D")
m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
return m
def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
from GPy import kern
from GPy.models import MRD
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, plot_sim)
# Ylist = [Ylist[0]]
k = kern.Linear(Q, ARD=True)
m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernel=k, initx="PCA_concat", initz='permute', **kw)
m['.*noise'] = [Y.var() / 40. for Y in Ylist]
if optimize:
print("Optimizing Model:")
m.optimize(messages=verbose, max_iters=8e3)
if plot:
m.X.plot("MRD Latent Space 1D")
m.plot_scales("MRD Scales")
return m
def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
from GPy import kern
from GPy.models import MRD
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
# Ylist = [Ylist[0]]
k = kern.Linear(Q, ARD=True)
inanlist = []
for Y in Ylist:
inan = _np.random.binomial(1, .6, size=Y.shape).astype(bool)
inanlist.append(inan)
Y[inan] = _np.nan
m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing,
kernel=k, inference_method=None,
initx="random", initz='permute', **kw)
if optimize:
print("Optimizing Model:")
m.optimize('bfgs', messages=verbose, max_iters=8e3, gtol=.1)
if plot:
m.X.plot("MRD Latent Space 1D")
m.plot_scales("MRD Scales")
return m
def brendan_faces(optimize=True, verbose=True, plot=True):
import GPy
import pods
data = pods.datasets.brendan_faces()
Q = 2
Y = data['Y']
Yn = Y - Y.mean()
Yn /= Yn.std()
m = GPy.models.BayesianGPLVM(Yn, Q, num_inducing=20)
# optimize
if optimize: m.optimize('bfgs', messages=verbose, max_iters=1000)
if plot:
ax = m.plot_latent(which_indices=(0, 1))
y = m.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, order='F', invert=False, scale=False)
lvm = GPy.plotting.matplot_dep.visualize.lvm(m.X.mean[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def olivetti_faces(optimize=True, verbose=True, plot=True):
import GPy
import pods
data = pods.datasets.olivetti_faces()
Q = 2
Y = data['Y']
Yn = Y - Y.mean()
Yn /= Yn.std()
m = GPy.models.BayesianGPLVM(Yn, Q, num_inducing=20)
if optimize: m.optimize('bfgs', messages=verbose, max_iters=1000)
if plot:
ax = m.plot_latent(which_indices=(0, 1))
y = m.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.image_show(y[None, :], dimensions=(112, 92), transpose=False, invert=False, scale=False)
lvm = GPy.plotting.matplot_dep.visualize.lvm(m.X.mean[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def stick_play(range=None, frame_rate=15, optimize=False, verbose=True, plot=True):
import GPy
import pods
data = pods.datasets.osu_run1()
# optimize
if range == None:
Y = data['Y'].copy()
else:
Y = data['Y'][range[0]:range[1], :].copy()
if plot:
y = Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.plotting.matplot_dep.visualize.data_play(Y, data_show, frame_rate)
return Y
def stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
import pods
data = pods.datasets.osu_run1()
# optimize
m = GPy.models.GPLVM(data['Y'], 2, kernel=kernel)
if optimize: m.optimize('bfgs', messages=verbose, max_f_eval=10000)
if plot:
plt.clf
ax = m.plot_latent()
y = m.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect'])
lvm_visualizer = GPy.plotting.matplot_dep.visualize.lvm(m.X[:1, :].copy(), m, data_show, latent_axes=ax)
raw_input('Press enter to finish')
lvm_visualizer.close()
data_show.close()
return m
def bcgplvm_linear_stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
import pods
data = pods.datasets.osu_run1()
# optimize
mapping = GPy.mappings.Linear(data['Y'].shape[1], 2)
m = GPy.models.BCGPLVM(data['Y'], 2, kernel=kernel, mapping=mapping)
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot and GPy.plotting.matplot_dep.visualize.visual_available:
plt.clf
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.plotting.matplot_dep.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
import pods
data = pods.datasets.osu_run1()
# optimize
back_kernel = GPy.kern.RBF(data['Y'].shape[1], lengthscale=5.)
mapping = GPy.mappings.Kernel(X=data['Y'], output_dim=2, kernel=back_kernel)
m = GPy.models.BCGPLVM(data['Y'], 2, kernel=kernel, mapping=mapping)
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot and GPy.plotting.matplot_dep.visualize.visual_available:
plt.clf
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.plotting.matplot_dep.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
# raw_input('Press enter to finish')
return m
def robot_wireless(optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
import pods
data = pods.datasets.robot_wireless()
# optimize
m = GPy.models.BayesianGPLVM(data['Y'], 4, num_inducing=25)
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot:
m.plot_latent()
return m
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
"""Interactive visualisation of the Stick Man data from Ohio State University with the Bayesian GPLVM."""
from GPy.models import BayesianGPLVM
from matplotlib import pyplot as plt
import numpy as np
import GPy
import pods
data = pods.datasets.osu_run1()
Q = 6
kernel = GPy.kern.RBF(Q, lengthscale=np.repeat(.5, Q), ARD=True)
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
m.data = data
m.likelihood.variance = 0.001
# optimize
try:
if optimize: m.optimize('bfgs', messages=verbose, max_iters=5e3, bfgs_factor=10)
except KeyboardInterrupt:
print("Keyboard interrupt, continuing to plot and return")
if plot:
fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
plt.sca(latent_axes)
m.plot_latent(ax=latent_axes)
y = m.Y[:1, :].copy()
data_show = GPy.plotting.matplot_dep.visualize.stick_show(y, connect=data['connect'])
dim_select = GPy.plotting.matplot_dep.visualize.lvm_dimselect(m.X.mean[:1, :].copy(), m, data_show, latent_axes=latent_axes, sense_axes=sense_axes)
fig.canvas.draw()
# Canvas.show doesn't work on OSX.
#fig.canvas.show()
raw_input('Press enter to finish')
return m
def cmu_mocap(subject='35', motion=['01'], in_place=True, optimize=True, verbose=True, plot=True):
import GPy
import pods
data = pods.datasets.cmu_mocap(subject, motion)
if in_place:
# Make figure move in place.
data['Y'][:, 0:3] = 0.0
Y = data['Y']
Y_mean = Y.mean(0)
Y_std = Y.std(0)
m = GPy.models.GPLVM((Y - Y_mean) / Y_std, 2)
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot:
ax = m.plot_latent()
y = m.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.skeleton_show(y[None, :], data['skel'])
lvm_visualizer = GPy.plotting.matplot_dep.visualize.lvm(m.X[0].copy(), m, data_show, latent_axes=ax)
raw_input('Press enter to finish')
lvm_visualizer.close()
data_show.close()
return m
def ssgplvm_simulation_linear():
import numpy as np
import GPy
N, D, Q = 1000, 20, 5
pi = 0.2
def sample_X(Q, pi):
x = np.empty(Q)
dies = np.random.rand(Q)
for q in range(Q):
if dies[q] < pi:
x[q] = np.random.randn()
else:
x[q] = 0.
return x
Y = np.empty((N, D))
X = np.empty((N, Q))
# Generate data from random sampled weight matrices
for n in range(N):
X[n] = sample_X(Q, pi)
w = np.random.randn(D, Q)
Y[n] = np.dot(w, X[n])
| bsd-3-clause |
ssh0/growing-string | triangular_lattice/vicsek/vicsek.py | 1 | 6304 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-05-15
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from triangular import LatticeTriangular as LT
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.animation as animation
import numpy as np
from numpy import linalg as la
import random
import time
rint = random.randint
randm = random.random
class Point:
def __init__(self, id, ix, iy):
self.id, self.x, self.y = id, ix, iy
# vel is unified and the value of it implies the direction of the
# velocity
self.vel = rint(0, 5)
self.priority = randm()
class Main:
def __init__(self, Lx=20, Ly=20, rho=0.9, lattice_scale=10, T=0.4, plot=True,
frames=100):
self.lattice = LT(- np.ones((Lx, Ly), dtype=np.int),
scale=lattice_scale)
self.N = int(Lx * Ly * rho)
self.points = [Point(n, rint(0, Lx - 1), rint(0, Ly - 1)) for n
in range(self.N)]
self.T = T
self.plot = plot
self.beta = 1. / self.T
self.order_param = []
self.num = 0
angs = [i * np.pi / 3. for i in range(6)]
self.velx = [np.cos(ang) for ang in angs]
self.vely = [-np.sin(ang) for ang in angs]
self.u = [np.array([vx, -vy]) for vx, vy in zip(self.velx, self.vely)]
self.lattice_X = self.lattice.coordinates_x
self.lattice_Y = self.lattice.coordinates_y
self.lattice_X = np.array(self.lattice_X).reshape(Lx, Ly)
self.lattice_Y = np.array(self.lattice_Y).reshape(Lx, Ly)
X_min, X_max = np.min(self.lattice_X), np.max(self.lattice_X)
Y_min, Y_max = np.min(self.lattice_Y), np.max(self.lattice_Y)
if self.plot:
self.fig, (self.ax1, self.ax2) = plt.subplots(
1, 2, figsize=(8, 10))
self.ax1.set_xlim([X_min, X_max])
self.ax1.set_ylim([Y_min, Y_max])
self.ax1.set_xticklabels([])
self.ax1.set_yticklabels([])
self.ax1.set_aspect('equal')
self.ax1.set_title("Lattice-Gas model for collective motion")
self.triang = tri.Triangulation(self.lattice_X.flatten(),
self.lattice_Y.flatten())
self.ax1.triplot(self.triang, color='whitesmoke', lw=0.5)
self.l, = self.ax2.plot([], [], 'b-')
self.ax2.set_title(r"Order parameter $m=\frac{1}{N} |\sum \vec{u}_{i}|$ ($T = %.2f$)"
% self.T)
self.ax2.set_ylim([0, 1])
def init_func(*arg):
return self.l,
ani = animation.FuncAnimation(self.fig, self.update, frames=frames,
init_func=init_func,
interval=1, blit=True, repeat=False)
plt.show()
else:
for i in range(100):
self.update(i)
print self.order_param[-1]
def update(self, num):
lowest, upper = {}, []
# 同じサイトにいるものを検出
for point in self.points:
if not lowest.has_key((point.x, point.y)):
lowest[(point.x, point.y)] = point
elif lowest[(point.x, point.y)].priority > point.priority:
upper.append(lowest[(point.x, point.y)])
lowest[(point.x, point.y)] = point
else:
upper.append(point)
# priority値最小のものだけ最近接効果(decided by Boltzmann eq)をうける
for point in lowest.values():
# 最近接の速度の合計を求める
velocities = np.array([0., 0.])
nnx, nny = self.lattice.neighbor_of(point.x, point.y)
for x, y in zip(nnx, nny):
if lowest.has_key((x, y)):
ang = lowest[(x, y)].vel
velocities += np.array([self.velx[ang], -self.vely[ang]])
# ボルツマン分布に従って確率的に方向を決定
A = [np.exp(self.beta * np.dot(u, velocities)) for u in self.u]
rand = randm() * sum(A)
p = 0
for i, P in enumerate(A):
p += P
if rand < p:
point.vel = i
break
# それ以外はランダムに向きを変えるように
for point in upper:
# change the velocity of the point
point.vel = rint(0, 5)
# 各点の座標とベクトルを更新し,描画
self.update_quivers()
# オーダーパラメーターをプロット
self.plot_order_param(num)
return self.quiver, self.l
def update_quivers(self):
# Get information to plot
X, Y = [], []
for point in self.points:
# Get possible direction
newx, newy = self.lattice.neighbor_of(point.x, point.y)
# Choose one by its velocity
point.x, point.y = newx[point.vel], newy[point.vel]
X.append(self.lattice_X[point.x, point.y])
Y.append(self.lattice_Y[point.x, point.y])
vel_x = [self.velx[p.vel] for p in self.points]
vel_y = [self.vely[p.vel] for p in self.points]
if self.plot:
self.quiver = self.ax1.quiver(X, Y, vel_x, vel_y,
units='xy', angles='xy', color='k')
def plot_order_param(self, num):
# nwidth = 20
self.order_param.append(self.cal_order_param())
self.num += 1
if self.plot:
nl = max(self.num - 20, 0)
nr = 1.25 * 20 + nl
self.ax2.set_xlim([nl, nr])
self.l.set_data(np.arange(nl, self.num), self.order_param[nl:])
def cal_order_param(self):
# return order parameter
velx = sum([self.velx[p.vel] for p in self.points])
vely = sum([self.vely[p.vel] for p in self.points])
return la.norm([velx, vely]) / self.N
if __name__ == '__main__':
main = Main(Lx=40, Ly=40, rho=0.9, T=0.41, frames=300, plot=True)
# main = Main(Lx=40, Ly=40, T=0.6, frames=1000, plot=True)
| mit |
hdmetor/scikit-learn | sklearn/metrics/__init__.py | 52 | 3394 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
PascalSteger/twiddle | act/vis_part_proj_dm.py | 1 | 1231 | #!/usr/bin/env python2
## \file
# Plot raw positions of DM particles
# (c) 2014 ETHZ, Pascal Steger, [email protected]
import sys, matplotlib
matplotlib.use('Agg')
from matplotlib import rc
rc('text',usetex=True)
#import pylab
from pylab import figure,xticks,yticks,grid,savefig
import matplotlib.pyplot as plt
i = len(sys.argv)
if i!=7:
print("usage: vis_part_dm.py xc yc zc r infile outfile")
print("ex: vis_part_dm.py 0.5 0.5 0.5 0.01 dm.dat mov/dm.png")
exit(1)
xc = float(sys.argv[1]); yc = float(sys.argv[2]); zc = float(sys.argv[3]); r = float(sys.argv[4])
infile=sys.argv[5]
outfile=sys.argv[6]
f = open(infile,'r')
x=[];y=[];z=[]
for line in f:
values = line.split()
xcheck = float(values[1])-xc
ycheck = float(values[2])-yc
zcheck = float(values[3])-zc
if(abs(xcheck)<r and abs(ycheck) < r and abs(zcheck)<r):
x.append(xcheck)
y.append(ycheck)
z.append(zcheck)
f.close()
fig = figure()
#xticks([-0.002,-0.001,0.0,0.001,0.002])
#yticks([-0.002,-0.001,0.0,0.001,0.002])
grid()
#ax = fig.add_subplot(111)
#imshow(x,y)
print(len(x))
plt.scatter(x, y,alpha=0.3,marker='d')
plt.xlabel(r'r\quad[{\rm Mpc}/h]')
plt.ylabel(r'r\quad[{\rm Mpc}/h]')
savefig(outfile)
| gpl-2.0 |
seanbechhofer/arduino | python/animated_bar_serial.py | 1 | 1174 | # Reads csv data from the serial line and plots it on a bar chart,
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import numpy as np
import time
import random
import serial
from sys import stdin
port = '/dev/cu.usbserial-AL00U1X7'
ser = serial.Serial(port, 9600)
def animated_barplot():
stuff = ser.readline().split(",")
print stuff
data = []
val = 0
for st in stuff:
try:
val = int(st)
except Exception, e:
val = 0
data.append(val)
plt.xlim(-1,len(data)+1)
plt.ylim(0, 400)
rects = plt.bar(range(len(data)), data, align = 'center', color= 'b', alpha=0.4)
fig.canvas.draw()
while (True):
stuff = ser.readline().split(",")
data = []
for st in stuff:
try:
val = int(st)
except Exception, e:
val = 0
data.append(val)
#print ("tick")
for rect, value in zip(rects,data):
rect.set_height(int(value))
fig.canvas.draw()
fig = plt.figure(figsize=(12,6))
win = fig.canvas.manager.window
win.after(100, animated_barplot)
plt.show()
| mit |
sys-bio/tellurium | tellurium/tests/sedml/test_data.py | 2 | 8722 | """
Testing of SED-ML data support, i.e., DataDescription.
"""
from __future__ import print_function, absolute_import
import os
import pytest
import matplotlib
from tellurium.tests.testdata import TESTDATA_DIR
from tellurium.sedml.data import DataDescriptionParser
from tellurium.sedml.tesedml import SEDMLTools
try:
import libsedml
except ImportError:
import tesedml as libsedml
from tellurium.sedml import tesedml
# ---------------------------------------------------------------------------------
BASE_DIR = os.path.join(TESTDATA_DIR, 'sedml', 'data')
SOURCE_CSV = os.path.join(BASE_DIR, "oscli.csv")
SOURCE_TSV = os.path.join(BASE_DIR, "oscli.tsv")
SOURCE_NUML = os.path.join(BASE_DIR, "./oscli.xml")
SOURCE_NUML_1D = os.path.join(BASE_DIR, "./numlData1D.xml")
SOURCE_NUML_2D = os.path.join(BASE_DIR, "./numlData2D.xml")
SOURCE_NUML_2DRC = os.path.join(BASE_DIR, "./numlData2DRC.xml")
SEDML_READ_CSV = os.path.join(BASE_DIR, "reading-oscli-csv.xml")
SEDML_READ_TSV = os.path.join(BASE_DIR, "reading-oscli-tsv.xml")
SEDML_READ_NUML = os.path.join(BASE_DIR, "reading-oscli-numl.xml")
SEDML_READ_NUML_1D = os.path.join(BASE_DIR, "reading-numlData1D.xml")
SEDML_READ_NUML_2D = os.path.join(BASE_DIR, "reading-numlData2D.xml")
SEDML_READ_NUML_2DRC = os.path.join(BASE_DIR, "reading-numlData2DRC.xml")
OMEX_PLOT_CSV = os.path.join(BASE_DIR, 'omex', "plot_csv.omex")
OMEX_PLOT_CSV_WITH_MODEL = os.path.join(BASE_DIR, 'omex', "plot_csv_with_model.omex")
OMEX_PLOT_NUML = os.path.join(BASE_DIR, 'omex', "plot_numl.omex")
OMEX_PLOT_NUML_WITH_MODEL = os.path.join(BASE_DIR, 'omex', "plot_numl_with_model.omex")
SOURCE_CSV_PARAMETERS = os.path.join(BASE_DIR, "parameters.csv")
SEDML_CSV_PARAMETERS = os.path.join(BASE_DIR, "parameter-from-data-csv.xml")
OMEX_CSV_PARAMETERS = os.path.join(BASE_DIR, 'omex', "parameter_from_data_csv.omex")
OMEX_CSV_JWS_ADLUNG2017_FIG2G = os.path.join(BASE_DIR, 'omex', "jws_adlung2017_fig2g.omex")
# ---------------------------------------------------------------------------------
MPL_BACKEND = None
def setup_module(module):
""" setup any state specific to the execution of the given module."""
global MPL_BACKEND
# Create a temporary directory
MPL_BACKEND = matplotlib.rcParams['backend']
matplotlib.pyplot.switch_backend("Agg")
def teardown_module(module):
""" teardown any state that was previously setup with a setup_module
method.
"""
matplotlib.pyplot.switch_backend(MPL_BACKEND)
matplotlib.pyplot.close('all')
def test_load_csv():
data = DataDescriptionParser._load_csv(SOURCE_CSV)
assert data is not None
assert data.shape[0] == 200
assert data.shape[1] == 3
def test_load_tsv():
data = DataDescriptionParser._load_tsv(SOURCE_TSV)
assert data is not None
assert data.shape[0] == 200
assert data.shape[1] == 3
def test_load_numl():
data = DataDescriptionParser._load_numl(SOURCE_NUML)
assert data is not None
def test_load_csv_parameters():
data = DataDescriptionParser._load_csv(SOURCE_CSV_PARAMETERS)
assert data is not None
assert data.shape[0] == 10
assert data.shape[1] == 1
def test_load_numl_1D():
data = DataDescriptionParser._load_numl(SOURCE_NUML_1D)
assert data is not None
def test_load_numl_2D():
data = DataDescriptionParser._load_numl(SOURCE_NUML_2D)
assert data is not None
def test_load_numl_2DRC():
data = DataDescriptionParser._load_numl(SOURCE_NUML_2D)
assert data is not None
def parseDataDescriptions(sedml_path):
""" Test helper functions.
Tries to parse all DataDescriptions in the SED-ML file.
"""
print('parseDataDescriptions:', sedml_path)
# load sedml document
assert os.path.exists(sedml_path)
doc_sedml = libsedml.readSedMLFromFile(sedml_path)
SEDMLTools.checkSEDMLDocument(doc_sedml)
# parse DataDescriptions
list_dd = doc_sedml.getListOfDataDescriptions()
# print(list_dd)
# print(len(list_dd))
assert len(list_dd) > 0
for dd in list_dd:
data_sources = DataDescriptionParser.parse(dd, workingDir=BASE_DIR)
assert data_sources is not None
assert type(data_sources) == dict
assert len(data_sources) > 0
return data_sources
def test_parse_csv():
data_sources = parseDataDescriptions(SEDML_READ_CSV)
assert "dataTime" in data_sources
assert "dataS1" in data_sources
assert len(data_sources["dataTime"]) == 200
assert len(data_sources["dataS1"]) == 200
def test_parse_csv_parameters():
data_sources = parseDataDescriptions(SEDML_CSV_PARAMETERS)
assert "dataIndex" in data_sources
assert "dataMu" in data_sources
assert len(data_sources["dataIndex"]) == 10
assert len(data_sources["dataMu"]) == 10
def test_parse_tsv():
data_sources = parseDataDescriptions(SEDML_READ_TSV)
assert "dataTime" in data_sources
assert "dataS1" in data_sources
assert len(data_sources["dataTime"]) == 200
assert len(data_sources["dataS1"]) == 200
def test_parse_numl():
data_sources = parseDataDescriptions(SEDML_READ_NUML)
assert "dataTime" in data_sources
assert "dataS1" in data_sources
assert len(data_sources["dataTime"]) == 200
assert len(data_sources["dataS1"]) == 200
def test_parse_numl_1D():
data_sources = parseDataDescriptions(SEDML_READ_NUML_1D)
assert data_sources is not None
assert len(data_sources) == 6
assert 'data_s_glu' in data_sources
assert 'data_s_pyr' in data_sources
assert 'data_s_acetate' in data_sources
assert 'data_s_acetald' in data_sources
assert 'data_s_EtOH' in data_sources
assert 'data_x' in data_sources
assert len(data_sources['data_s_glu']) == 1
def test_parse_numl_2D():
data_sources = parseDataDescriptions(SEDML_READ_NUML_2D)
assert data_sources is not None
assert len(data_sources) == 4
assert 'dataBL' in data_sources
assert 'dataB' in data_sources
assert 'dataS1' in data_sources
assert 'dataTime' in data_sources
assert len(data_sources['dataB']) == 6
def test_parse_numl_2DRC():
data_sources = parseDataDescriptions(SEDML_READ_NUML_2DRC)
assert data_sources is not None
assert len(data_sources) == 4
assert 'dataBL' in data_sources
assert 'dataB' in data_sources
assert 'dataS1' in data_sources
assert 'dataTime' in data_sources
assert len(data_sources['dataB']) == 6
def test_omex_plot_csv(tmpdir):
results = tesedml.executeCombineArchive(OMEX_PLOT_CSV, workingDir=str(tmpdir))
result = list(results.values())[0]
dg_dict = result['dataGenerators']
assert len(dg_dict) == 2
assert "dgDataS1" in dg_dict
assert "dgDataTime" in dg_dict
assert len(dg_dict["dgDataS1"]) == 200
assert len(dg_dict["dgDataTime"]) == 200
def test_omex_plot_csv_with_model(tmpdir):
results = tesedml.executeCombineArchive(OMEX_PLOT_CSV_WITH_MODEL, workingDir=str(tmpdir))
result = list(results.values())[0]
dg_dict = result['dataGenerators']
assert len(dg_dict) == 5
assert "dgDataS1" in dg_dict
assert "dgDataTime" in dg_dict
assert len(dg_dict["dgDataS1"]) == 200
assert len(dg_dict["dgDataTime"]) == 200
def test_omex_plot_numl(tmpdir):
results = tesedml.executeCombineArchive(OMEX_PLOT_NUML, workingDir=str(tmpdir))
result = list(results.values())[0]
dg_dict = result['dataGenerators']
assert len(dg_dict) == 2
assert "dgDataS1" in dg_dict
assert "dgDataTime" in dg_dict
assert len(dg_dict["dgDataS1"]) == 200
assert len(dg_dict["dgDataTime"]) == 200
def test_omex_plot_numl_with_model(tmpdir):
results = tesedml.executeCombineArchive(OMEX_PLOT_NUML_WITH_MODEL, workingDir=str(tmpdir))
result = list(results.values())[0]
dg_dict = result['dataGenerators']
assert len(dg_dict) == 5
assert "dgDataS1" in dg_dict
assert "dgDataTime" in dg_dict
assert len(dg_dict["dgDataS1"]) == 200
assert len(dg_dict["dgDataTime"]) == 200
def test_omex_jws_adlung2017_fig2gl(tmpdir):
results = tesedml.executeCombineArchive(OMEX_CSV_JWS_ADLUNG2017_FIG2G, workingDir=str(tmpdir))
result = list(results.values())[0]
dg_dict = result['dataGenerators']
assert len(dg_dict) == 40
@pytest.mark.skip("Not supported in L1V3, will be part of L1V4")
def test_omex_csv_parameters(tmpdir):
results = tesedml.executeCombineArchive(OMEX_CSV_PARAMETERS, workingDir=str(tmpdir))
result = list(results.values())[0]
dgs = result['dataGenerators']
dg_dict = list(dgs.values())[0]
assert len(dg_dict) == 2
assert "dgDataIndex" in dg_dict
assert "dgDataMu" in dg_dict
assert len(dg_dict["dgDataIndex"]) == 10
assert len(dg_dict["dgDataMu"]) == 10
| apache-2.0 |
macks22/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
DonBeo/statsmodels | statsmodels/examples/ex_kernel_regression2.py | 34 | 1511 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
y_true = np.sin(x*5)/x + 2*x
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
model = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls',
defaults=nparam.EstimatorSettings(efficient=True))
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
model1 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls')
mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='ll',
var_type='c', bw='cv_ls')
mean2, mfx2 = model2.fit()
print(model.bw)
print(model1.bw)
print(model2.bw)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean, lw=2, label='kernel mean')
ax.plot(x, mean2, lw=2, label='kernel mean')
ax.legend()
plt.show()
| bsd-3-clause |
aalmah/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py | 44 | 5013 | from __future__ import print_function
import nose
import unittest
import numpy as np
from theano.compat.six.moves import xrange
import theano
from .localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print('Loading van Hateren images')
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print('Patches shape', patches.shape, self.n_patches, patches5.shape)
# 2. Set up an autoencoder
print('Setting up autoencoder')
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print('Cost', ii, cost_ii)
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
| bsd-3-clause |
olebole/astrometry.net | blind/test_tweak_plots.py | 2 | 5115 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import os.path
from pylab import *
from numpy import *
# test_tweak 2>tt.py
import tt
def savefig(fn):
from pylab import savefig as sf
print('Saving', fn)
sf(fn)
if __name__ == '__main__':
#print 'me:', __file__
#tt = os.path.join(os.path.dirname(__file__), 'test_tweak')
for run in [2,3]:
tanxy = getattr(tt, 'origxy_%i' % run)
xy = getattr(tt, 'xy_%i' % run)
noisyxy = getattr(tt, 'noisyxy_%i' % run)
gridx = getattr(tt, 'gridx_%i' % run)
gridy = getattr(tt, 'gridy_%i' % run)
truesip_a = getattr(tt, 'truesip_a_%i' % run)
truesip_b = getattr(tt, 'truesip_b_%i' % run)
sip_a = getattr(tt, 'sip_a_%i' % run)
sip_b = getattr(tt, 'sip_b_%i' % run)
x0,y0 = tt.x0, tt.y0
truedxy = xy - tanxy
obsdxy = noisyxy - tanxy
xlo,xhi = -500, 2500
ylo,yhi = -500, 2500
X1 = linspace(xlo, xhi, 100)
Y1 = gridy
X1,Y1 = meshgrid(X1,Y1)
X1 = X1.T
Y1 = Y1.T
X2 = gridx
Y2 = linspace(ylo, yhi, 100)
X2,Y2 = meshgrid(X2,Y2)
truesipx_x = zeros_like(X1)
truesipy_x = zeros_like(X1)
truesipx_y = zeros_like(Y2)
truesipy_y = zeros_like(Y2)
for xo,yo,c in truesip_a:
truesipx_y += c * (X2 - x0)**xo * (Y2 - y0)**yo
truesipx_x += c * (X1 - x0)**xo * (Y1 - y0)**yo
for xo,yo,c in truesip_b:
truesipy_y += c * (X2 - x0)**xo * (Y2 - y0)**yo
truesipy_x += c * (X1 - x0)**xo * (Y1 - y0)**yo
x = xy[:,0]
y = xy[:,1]
truedx = truedxy[:,0]
truedy = truedxy[:,1]
obsdx = obsdxy[:,0]
obsdy = obsdxy[:,1]
for order in range(2,6):
clf()
sipx_x = zeros_like(X1)
sipy_x = zeros_like(X1)
sipx_y = zeros_like(Y2)
sipy_y = zeros_like(Y2)
for xo,yo,c in sip_a[order]:
sipx_y += c * (X2 - x0)**xo * (Y2 - y0)**yo
sipx_x += c * (X1 - x0)**xo * (Y1 - y0)**yo
for xo,yo,c in sip_b[order]:
sipy_y += c * (X2 - x0)**xo * (Y2 - y0)**yo
sipy_x += c * (X1 - x0)**xo * (Y1 - y0)**yo
subplot(2,2,1)
plot(x, truedx, 'bs', mec='b', mfc='None')
plot(x, obsdx, 'r.')
plot(X1, -truesipx_x, 'b-', alpha=0.2)
plot(X1, -sipx_x, 'r-', alpha=0.2)
xlabel('x')
ylabel('dx')
xlim(xlo, xhi)
subplot(2,2,2)
plot(x, truedy, 'bs', mec='b', mfc='None')
plot(x, obsdy, 'r.')
plot(X1, -truesipy_x, 'b-', alpha=0.2)
plot(X1, -sipy_x, 'r-', alpha=0.2)
xlabel('x')
ylabel('dy')
xlim(xlo, xhi)
subplot(2,2,3)
plot(y, truedx, 'bs', mec='b', mfc='None')
plot(y, obsdx, 'r.')
plot(Y2, -truesipx_y, 'b-', alpha=0.2)
plot(Y2, -sipx_y, 'r-', alpha=0.2)
xlabel('y')
ylabel('dx')
xlim(xlo, xhi)
subplot(2,2,4)
plot(y, truedy, 'bs', mec='b', mfc='None')
plot(y, obsdy, 'r.')
plot(Y2, -truesipy_y, 'b-', alpha=0.2)
plot(Y2, -sipy_y, 'r-', alpha=0.2)
xlabel('y')
ylabel('dy')
xlim(xlo, xhi)
savefig('tt%i-%i.png' % (run, order))
clf()
subplot(111)
plot(tanxy[:,0], tanxy[:,1], 'b.')
plot(noisyxy[:,0], noisyxy[:,1], 'r.')
plot(xy[:,0], xy[:,1], 'bo', mec='b', mfc='None')
if False:
X3,Y3 = meshgrid(linspace(xlo, xhi, 11),
linspace(ylo, yhi, 11))
truesipx = X3
truesipy = Y3
for xo,yo,c in truesip_a:
truesipx += c * (X3 - x0)**xo * (Y3 - y0)**yo
for xo,yo,c in truesip_b:
truesipy += c * (X3 - x0)**xo * (Y3 - y0)**yo
sipx = X3
sipy = Y3
for xo,yo,c in sip_a[order]:
sipx += c * (X3 - x0)**xo * (Y3 - y0)**yo
for xo,yo,c in sip_b[order]:
sipy += c * (X3 - x0)**xo * (Y3 - y0)**yo
plot(truesipx, truesipy, 'bs', mec='b', mfc='None')
plot(sipx, sipy, 'ms', mec='m', mfc='None')
plot(X1, Y1, 'g-', alpha=0.25)
plot(X2, Y2, 'g-', alpha=0.25)
plot(truesipx_x + X1, truesipy_x + Y1, 'b-', alpha=0.25)
plot(truesipx_y + X2, truesipy_y + Y2, 'b-', alpha=0.25)
plot(sipx_x + X1, sipy_x + Y1, 'r-', alpha=0.25)
plot(sipx_y + X2, sipy_y + Y2, 'r-', alpha=0.25)
xlim(xlo,xhi)
ylim(ylo,yhi)
savefig('ttxy%i-%i.png' % (run,order))
| bsd-3-clause |
dismalpy/dismalpy | dismalpy/ssm/tests/test_univariate.py | 1 | 8811 | """
Tests for univariate treatment of multivariate models
TODO skips the tests for measurement disturbance and measurement disturbance
covariance, which do not pass. The univariate smoother *appears* to be
correctly implemented against Durbin and Koopman (2012) chapter 6, yet still
gives a different answer from the conventional smoother. It's not clear if
this is intended (i.e. it has to be at least slightly different, since the
conventional smoother can return a non-diagonal covariance matrix whereas the
univariate smoother must return a diagonal covariance matrix).
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
from dismalpy import ssm
import dismalpy.ssm.tests.results_kalman as results_kalman_filter
from numpy.testing import assert_almost_equal, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
class TestClark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, alternate_timing=False, **kwargs):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
k_states = 6
self.model = ssm.Model(data, k_states=k_states, **kwargs)
# Statespace representation
self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
self.model.transition[
([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])
] = [1, 1, 0, 0, 1, 1, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'],
)
self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.obs_cov[1, 1, 0] = sigma_ec**2
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: self.modification
if not alternate_timing:
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
else:
self.model.timing_init_filtered = True
self.model.initialize_known(initial_state, initial_state_cov)
# Conventional filtering, smoothing, and simulation smoothing
self.model.filter_conventional = True
self.conventional_results = self.model.smooth()
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
self.conventional_sim = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Univariate filtering, smoothing, and simulation smoothing
self.model.filter_univariate = True
self.univariate_results = self.model.smooth()
self.univariate_sim = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
def test_using_univariate(self):
# Regression test to make sure the univariate_results actually
# used the univariate Kalman filtering approach (i.e. that the flag
# being set actually caused the filter to not use the conventional
# filter)
assert not self.conventional_results.filter_univariate
assert self.univariate_results.filter_univariate
assert_allclose(
self.conventional_results.forecasts_error_cov[1,1,0],
143.03724478030821
)
assert_allclose(
self.univariate_results.forecasts_error_cov[1,1,0],
120.66208525029386
)
def test_forecasts(self):
assert_almost_equal(
self.conventional_results.forecasts[0,:],
self.univariate_results.forecasts[0,:], 9
)
def test_forecasts_error(self):
assert_almost_equal(
self.conventional_results.forecasts_error[0,:],
self.univariate_results.forecasts_error[0,:], 9
)
def test_forecasts_error_cov(self):
assert_almost_equal(
self.conventional_results.forecasts_error_cov[0,0,:],
self.univariate_results.forecasts_error_cov[0,0,:], 9
)
def test_filtered_state(self):
assert_almost_equal(
self.conventional_results.filtered_state,
self.univariate_results.filtered_state, 8
)
def test_filtered_state_cov(self):
assert_almost_equal(
self.conventional_results.filtered_state_cov,
self.univariate_results.filtered_state_cov, 9
)
def test_predicted_state(self):
assert_almost_equal(
self.conventional_results.predicted_state,
self.univariate_results.predicted_state, 8
)
def test_predicted_state_cov(self):
assert_almost_equal(
self.conventional_results.predicted_state_cov,
self.univariate_results.predicted_state_cov, 9
)
def test_loglike(self):
assert_allclose(
self.conventional_results.llf_obs,
self.univariate_results.llf_obs
)
def test_smoothed_states(self):
assert_almost_equal(
self.conventional_results.smoothed_state,
self.univariate_results.smoothed_state, 8
)
def test_smoothed_states_cov(self):
assert_almost_equal(
self.conventional_results.smoothed_state_cov,
self.univariate_results.smoothed_state_cov, 6
)
@SkipTest
def test_smoothed_measurement_disturbance(self):
assert_almost_equal(
self.conventional_results.smoothed_measurement_disturbance,
self.univariate_results.smoothed_measurement_disturbance, 9
)
@SkipTest
def test_smoothed_measurement_disturbance_cov(self):
assert_almost_equal(
self.conventional_results.smoothed_measurement_disturbance_cov,
self.univariate_results.smoothed_measurement_disturbance_cov, 9
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.conventional_results.smoothed_state_disturbance,
self.univariate_results.smoothed_state_disturbance,
atol=1e-7
)
def test_smoothed_state_disturbance_cov(self):
assert_almost_equal(
self.conventional_results.smoothed_state_disturbance_cov,
self.univariate_results.smoothed_state_disturbance_cov, 9
)
def test_simulation_smoothed_state(self):
assert_almost_equal(
self.conventional_sim.simulated_state,
self.univariate_sim.simulated_state, 9
)
def test_simulation_smoothed_measurement_disturbance(self):
assert_almost_equal(
self.conventional_sim.simulated_measurement_disturbance,
self.univariate_sim.simulated_measurement_disturbance, 9
)
def test_simulation_smoothed_state_disturbance(self):
assert_almost_equal(
self.conventional_sim.simulated_state_disturbance,
self.univariate_sim.simulated_state_disturbance, 9
)
class TestClark1989Alternate(TestClark1989):
def __init__(self, *args, **kwargs):
super(TestClark1989Alternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
| bsd-2-clause |
pv/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
pbhalesain/kafka | system_test/utils/metrics.py | 89 | 13937 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| apache-2.0 |
fatadama/estimation | challenge_problem/trials/enkf_trials.py | 1 | 8275 | """@package enkf_trials
loads data, passes through ensemble Kalman Filter
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import sys
import time
import scipy.stats as stats
sys.path.append('../')
import cp_dynamics
sys.path.append('../../filters/python/enkf')
sys.path.append('../../filters/python/lib')
import enkf
sys.path.append('../sim_data')
import data_loader
import trials_processing
def eqom_enkf(x,t,u,v):
return cp_dynamics.eqom_stoch(x,t,v)
def enkf_test(dt,tf,mux0,P0,YK,Qk,Rk,flag_adapt=False):
global nameBit
# measurement influence matrix
Hk = np.array([ [1.0,0.0] ])
if nameBit == 1:
eqom_use = eqom_enkf
if nameBit == 2:
eqom_use = eqom_enkf
if nameBit == 3:
eqom_use = eqom_enkf
if flag_adapt:
ENKF = enkf.adaptive_enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)
else:
# create nonadaptive EnKF object
ENKF = enkf.enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)
nSteps = int(tf/dt)+1
ts = 0.0
#initialize EnKF
ENKF.init(mux0,P0,ts)
xf = np.zeros((nSteps,2))
Pf = np.zeros((nSteps,4))
Nf = np.zeros(nSteps)
XK = np.zeros((nSteps,2,ENKF._N))
tk = np.arange(0.0,tf,dt)
#get the mean and covariance estimates
Nf[0] = ENKF.get_N()
xf[0,:] = np.mean(ENKF.xk,axis=1)
Pxx = np.zeros((2,2))
for k in range(ENKF.get_N()):
Pxx = Pxx + 1.0/(1.0+float(ENKF._N))*np.outer(ENKF.xk[:,k]-xf[0,:],ENKF.xk[:,k]-xf[0,:])
Pf[0,:] = Pxx.reshape((4,))
t1 = time.time()
for k in range(1,nSteps):
# get the new measurement
ym = np.array([YK[k]])
ts = ts + dt
# sync the ENKF, with continuous-time integration
# propagate filter
ENKF.propagateOde(dt)
#ENKF.propagate(dt)
# update
ENKF.update(ym)
# resample ??
#ENKF.resample()
# log
xf[k,:] = np.mean(ENKF.xk,axis=1)
Pxx = np.zeros((2,2))
for kj in range(ENKF.get_N()):
Pxx = Pxx + 1.0/(float(ENKF._N)-1.0)*np.outer(ENKF.xk[:,kj]-xf[k,:],ENKF.xk[:,kj]-xf[k,:])
Pf[k,:] = Pxx.reshape((4,))
Nf[k] = ENKF.get_N()
if not flag_adapt:
XK[k,:,:] = ENKF.xk.copy()
t2 = time.time()
print("Elapsed time: %f sec" % (t2-t1))
return(xf,Pf,Nf,XK)
def main():
global nameBit
names = ['sims_10_fast']
#names = ['sims_01_slow','sims_01_medium','sims_10_slow','sims_10_medium','sims_11_slow','sims_11_medium']# test case
flag_adapt = False
for namecounter in range(len(names)):
nameNow = names[namecounter]
(tsim,XK,YK,mu0,P0,Ns,dt,tf) = data_loader.load_data(nameNow,'../sim_data/')
Ns = 1
nameBit = int(nameNow[5:7],2)
# parse the name
if nameBit == 1:
# tuned noise levels for the ENKF with white noise forcing
if dt > 0.9:# slow sampling
Qk = np.array([[1.0]])
elif dt > 0.09:# medium sampling
Qk = np.array([[0.1]])
else:# fast sampling
Qk = np.array([[0.001]])
Rk = np.array([[1.0]])
if nameBit == 2:
# tuned noise levels for the ENKF with cosine forcing
if dt > 0.9:# slow sampling
Qk = np.array([[6.0]])
elif dt > 0.09:# medium sampling
Qk = np.array([[30.0]])
else:# fast sampling
Qk = np.array([[100.0]])
Rk = np.array([[1.0]])
if nameBit == 3:
# tuned noise levels for the ENKF with cosine forcing and white noise
if dt > 0.9:# slow sampling
Qk = np.array([[5.0]])
elif dt > 0.09:# medium sampling
Qk = np.array([[20.0]])
else:# fast sampling
Qk = np.array([[160.0]])
Rk = np.array([[1.0]])
# number of steps in each simulation
print(Qk[0,0])
nSteps = len(tsim)
nees_history = np.zeros((nSteps,Ns))
Nf_history = np.zeros((nSteps,Ns))
e_sims = np.zeros((Ns*nSteps,2))
for counter in range(Ns):
xk = XK[:,(2*counter):(2*counter+2)]
yk = YK[:,counter]
(xf,Pf,Nf,XKO) = enkf_test(dt,tf,mu0,P0,yk,Qk,Rk,flag_adapt)
# store the number of particles, relevant if adaptive
Nf_history[:,counter] = Nf.copy()
# compute the unit variance transformation of the error
e1 = np.zeros((nSteps,2))
chi2 = np.zeros(nSteps)
for k in range(nSteps):
P = Pf[k,:].reshape((2,2))
Pinv = np.linalg.inv(P)
chi2[k] = np.dot(xk[k,:]-xf[k,:],np.dot(Pinv,xk[k,:]-xf[k,:]))
# chi2 is the NEES statistic. Take the mean
nees_history[:,counter] = chi2.copy()
mean_nees = np.sum(chi2)/float(nSteps)
print(mean_nees)
# mean NEES
mse = np.sum(np.power(xk-xf,2.0),axis=0)/float(nSteps)
e_sims[(counter*nSteps):(counter*nSteps+nSteps),:] = xk-xf
print("MSE: %f,%f" % (mse[0],mse[1]))
print("ENKF sim %d/%d case %d/%d" % (counter+1,Ns,namecounter+1,len(names)))
if Ns < 2:
fig1 = plt.figure()
ax = []
for k in range(6):
if k < 2:
nam = 'x' + str(k+1)
elif k < 4:
nam = 'e' + str(k-1)
else:
nam = 'xp' + str(k-3)
ax.append(fig1.add_subplot(3,2,k+1,ylabel=nam))
if k < 2:
ax[k].plot(tsim,xk[:,k],'b-')
ax[k].plot(tsim,xf[:,k],'m--')
if k == 0:
ax[k].plot(tsim,yk,'r--')
elif k < 4:
ax[k].plot(tsim,xk[:,k-2]-xf[:,k-2])
ax[k].plot(tsim,3.0*np.sqrt(Pf[:,3*(k-2)]),'r--')
ax[k].plot(tsim,-3.0*np.sqrt(Pf[:,3*(k-2)]),'r--')
else:
ax[k].plot(tsim,xk[:,k-4],'b-')
ax[k].plot(tsim,XKO[:,k-4,:],'d')
ax[k].grid()
fig1.show()
if flag_adapt:
trials_processing.errorParsing(e_sims,nees_history,'aenkf',nameNow)
else:
trials_processing.errorParsing(e_sims,nees_history,'enkf',nameNow)
mse_tot = np.mean(np.power(e_sims,2.0),axis=0)
print("mse_tot: %f,%f" % (mse_tot[0],mse_tot[1]))
# get the mean NEES value versus simulation time across all sims
nees_mean = np.sum(nees_history,axis=1)/Ns
# get the mean number of particles in time
Nf_mean = np.sum(Nf_history,axis=1)/Ns
# get 95% confidence bounds for chi-sqaured... the df is the number of sims times the dimension of the state
chiUpper = stats.chi2.ppf(.975,2.0*Ns)/float(Ns)
chiLower = stats.chi2.ppf(.025,2.0*Ns)/float(Ns)
# plot the mean NEES with the 95% confidence bounds
fig2 = plt.figure(figsize=(6.0,3.37)) #figsize tuple is width, height
if flag_adapt:
tilt = "AENKF, Ts = %.2f, %d sims, " % (dt, Ns)
else:
tilt = "ENKF, Ts = %.2f, %d sims, " % (dt, Ns)
if nameBit == 0:
tilt = tilt + 'unforced'
if nameBit == 1:
#white-noise only
tilt = tilt + 'white-noise forcing'
if nameBit == 2:
tilt = tilt + 'cosine forcing'
if nameBit == 3:
#white-noise and cosine forcing
tilt = tilt + 'white-noise and cosine forcing'
ax = fig2.add_subplot(111,ylabel='mean NEES',title=tilt)
ax.plot(tsim,chiUpper*np.ones(nSteps),'r--')
ax.plot(tsim,chiLower*np.ones(nSteps),'r--')
ax.plot(tsim,nees_mean,'b-')
ax.grid()
fig2.show()
# save the figure
if flag_adapt:
fig2.savefig('nees_aenkf_' + nameNow + '.png')
else:
fig2.savefig('nees_enkf_' + nameNow + '.png')
# find fraction of inliers
l1 = (nees_mean < chiUpper).nonzero()[0]
l2 = (nees_mean > chiLower).nonzero()[0]
# get number of inliers
len_in = len(set(l1).intersection(l2))
# get number of super (above) liers (sic)
len_super = len((nees_mean > chiUpper).nonzero()[0])
# get number of sub-liers (below)
len_sub = len((nees_mean < chiLower).nonzero()[0])
print("Conservative (below 95%% bounds): %f" % (float(len_sub)/float(nSteps)))
print("Optimistic (above 95%% bounds): %f" % (float(len_super)/float(nSteps)))
# save metrics
if flag_adapt:
FID = open('metrics_aenkf_' + nameNow + '.txt','w')
else:
FID = open('metrics_enkf_' + nameNow + '.txt','w')
FID.write("mse1,mse2,nees_below95,nees_above95\n")
FID.write("%f,%f,%f,%f\n" % (mse_tot[0],mse_tot[1],float(len_sub)/float(nSteps),float(len_super)/float(nSteps)))
FID.close()
# plot the mean number of particles
if flag_adapt:
fig = plt.figure(figsize=(6.0,3.37)) #figsize tuple is width, height
tilt = "AENKF, Ts = %.2f, %d sims, " % (dt, Ns)
if nameBit == 0:
tilt = tilt + 'unforced'
if nameBit == 1:
#white-noise only
tilt = tilt + 'white-noise forcing'
if nameBit == 2:
tilt = tilt + 'cosine forcing'
if nameBit == 3:
#white-noise and cosine forcing
tilt = tilt + 'white-noise and cosine forcing'
ax = fig.add_subplot(111,ylabel='mean particles',title=tilt)
ax.plot(tsim,Nf_mean,'b-')
ax.grid()
fig.show()
# save the figure
fig.savefig('Nf_aenkf_' + nameNow + '.png')
raw_input("Return to exit")
return
if __name__ == "__main__":
main() | gpl-2.0 |
jamlamberti/bogo_probe | learner/naive_bayes.py | 1 | 1051 | """An Naive Bayes Implementation"""
from sklearn.naive_bayes import GaussianNB
from .learner import Learner
class NaiveBayes(Learner):
"""Naive Bayes Wrapper"""
def __init__(self):
super(NaiveBayes, self).__init__()
self.classifier = GaussianNB()
self.log.debug("Naive Bayes classifier initialized.")
def train(self, train_x, train_y):
"""
Train Naive Bayes classifier
"""
self.log.info("Training Naive Bayes classifier")
self.classifier.fit(train_x, train_y)
self.log.info("Done training Naive Bayes classifier")
def predict(self, test_x):
"""
Return predicted class labels
"""
self.log.info("Computing Naive Bayes predictions")
return self.classifier.predict(test_x)
def predict_proba(self, test_x):
"""
Return predicted probabilities from Naive Bayes classifier
"""
self.log.info("Computing Naive Bayes probabilities")
return self.classifier.predict_proba(test_x)
| gpl-3.0 |
bnaul/scikit-learn | sklearn/model_selection/_split.py | 2 | 81255 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from collections.abc import Iterable
import warnings
from itertools import chain, combinations
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
from inspect import signature
import numpy as np
from scipy.special import comb
from ..utils import indexable, check_random_state, _safe_indexing
from ..utils import _approximate_mode
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _deprecate_positional_args
from ..utils.multiclass import type_of_target
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(metaclass=ABCMeta):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= 1:
raise ValueError(
'Cannot perform LeaveOneOut with n_samples={}.'.format(
n_samples)
)
return range(n_samples)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets. Must be strictly less than the number of
samples.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= self.p:
raise ValueError(
'p={} must be strictly less than the number of '
'samples={}'.format(self.p, n_samples)
)
for combination in combinations(range(n_samples), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
@_deprecate_positional_args
def __init__(self, n_splits, *, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
if not shuffle and random_state is not None: # None is the default
raise ValueError(
'Setting a random_state has no effect since shuffle is '
'False. You should leave '
'random_state to its default (None), or set shuffle=True.',
)
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}.")
.format(self.n_splits, n_samples))
for train, test in super().split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle the data before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int or RandomState instance, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf)
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
@_deprecate_positional_args
def __init__(self, n_splits=5, *, shuffle=False,
random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle,
random_state=random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=5):
super().__init__(n_splits, shuffle=False, random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int or RandomState instance, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf)
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
The implementation is designed to:
* Generate test sets such that all contain the same distribution of
classes, or as close as possible.
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Preserve order dependencies in the dataset ordering, when
``shuffle=False``: all samples from class k in some test set were
contiguous in y, or separated in y by samples from classes other than k.
* Generate test sets where the smallest and largest differ by at most one
sample.
.. versionchanged:: 0.22
The previous implementation did not follow the last constraint.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
@_deprecate_positional_args
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle,
random_state=random_state)
def _make_test_folds(self, X, y=None):
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ('binary', 'multiclass')
if type_of_target_y not in allowed_target_types:
raise ValueError(
'Supported target types are: {}. Got {!r} instead.'.format(
allowed_target_types, type_of_target_y))
y = column_or_1d(y)
_, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True)
# y_inv encodes y according to lexicographic order. We invert y_idx to
# map the classes so that they are encoded by order of appearance:
# 0 represents the first label appearing in y, 1 the second, etc.
_, class_perm = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("n_splits=%d cannot be greater than the"
" number of members in each class."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (min_groups, self.n_splits)), UserWarning)
# Determine the optimal number of samples from each class in each fold,
# using round robin over the sorted y. (This can be done direct from
# counts, but that code is unreadable.)
y_order = np.sort(y_encoded)
allocation = np.asarray(
[np.bincount(y_order[i::self.n_splits], minlength=n_classes)
for i in range(self.n_splits)])
# To maintain the data order dependencies as best as possible within
# the stratification constraint, we assign samples from each class in
# blocks (and then mess that up when shuffle=True).
test_folds = np.empty(len(y), dtype='i')
for k in range(n_classes):
# since the kth column of allocation stores the number of samples
# of class k in each test set, this generates blocks of fold
# indices corresponding to the allocation for class k.
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super().split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
.. versionadded:: 0.18
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
max_train_size : int, default=None
Maximum size for a single training set.
test_size : int, default=None
Used to limit the size of the test set. Defaults to
``n_samples // (n_splits + 1)``, which is the maximum allowed value
with ``gap=0``.
gap : int, default=0
Number of samples to exclude from the end of each train set before
the test set.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> tscv = TimeSeriesSplit()
>>> print(tscv)
TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
TRAIN: [0 1 2 3] TEST: [4]
TRAIN: [0 1 2 3 4] TEST: [5]
>>> # Fix test_size to 2 with 12 samples
>>> X = np.random.randn(12, 2)
>>> y = np.random.randint(0, 2, 12)
>>> tscv = TimeSeriesSplit(n_splits=3, test_size=2)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0 1 2 3 4 5] TEST: [6 7]
TRAIN: [0 1 2 3 4 5 6 7] TEST: [8 9]
TRAIN: [0 1 2 3 4 5 6 7 8 9] TEST: [10 11]
>>> # Add in a 2 period gap
>>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0 1 2 3] TEST: [6 7]
TRAIN: [0 1 2 3 4 5] TEST: [8 9]
TRAIN: [0 1 2 3 4 5 6 7] TEST: [10 11]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)`` by default,
where ``n_samples`` is the number of samples.
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_size=None,
test_size=None,
gap=0):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_size = max_train_size
self.test_size = test_size
self.gap = gap
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap = self.gap
test_size = self.test_size if self.test_size is not None \
else n_samples // n_folds
# Make sure we have enough samples for the given split parameters
if n_folds > n_samples:
raise ValueError(
(f"Cannot have number of folds={n_folds} greater"
f" than the number of samples={n_samples}."))
if n_samples - gap - (test_size * n_splits) <= 0:
raise ValueError(
(f"Too many splits={n_splits} for number of samples"
f"={n_samples} with test_size={test_size} and gap={gap}."))
indices = np.arange(n_samples)
test_starts = range(n_samples - n_splits * test_size,
n_samples, test_size)
for test_start in test_starts:
train_end = test_start - gap
if self.max_train_size and self.max_train_size < train_end:
yield (indices[train_end - self.max_train_size:train_end],
indices[test_start:test_start + test_size])
else:
yield (indices[:train_end],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> logo.get_n_splits(groups=groups) # 'groups' is always required
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return len(np.unique(groups))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> lpgo.get_n_splits(groups=groups) # 'groups' is always required
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class _RepeatedSplits(metaclass=ABCMeta):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int or RandomState instance, default=None
Passes `random_state` to the arbitrary repeating cross validator.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
@_deprecate_positional_args
def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, numbers.Integral):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of length n_samples
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
def __repr__(self):
return _build_repr(self)
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int or RandomState instance, default=None
Controls the randomness of each repeated cross-validation instance.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
@_deprecate_positional_args
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
KFold, n_repeats=n_repeats,
random_state=random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int or RandomState instance, default=None
Controls the generation of the random states for each repetition.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
@_deprecate_positional_args
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
StratifiedKFold, n_repeats=n_repeats, random_state=random_state,
n_splits=n_splits)
class BaseShuffleSplit(metaclass=ABCMeta):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
@_deprecate_positional_args
def __init__(self, n_splits=10, *, test_size=None, train_size=None,
random_state=None):
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._default_test_size = 0.1
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
5
>>> print(rs)
ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
TRAIN: [1 3 0 4] TEST: [5 2]
TRAIN: [4 0 2 5] TEST: [1 3]
TRAIN: [1 2 4 0] TEST: [3 5]
TRAIN: [3 4 1 0] TEST: [5 2]
TRAIN: [3 5 1 0] TEST: [2 4]
>>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
TRAIN: [1 3 0] TEST: [5 2]
TRAIN: [4 0 2] TEST: [1 3]
TRAIN: [1 2 4] TEST: [3 5]
TRAIN: [3 4 1] TEST: [5 2]
TRAIN: [3 5 1] TEST: [2 4]
"""
@_deprecate_positional_args
def __init__(self, n_splits=10, *, test_size=None, train_size=None,
random_state=None):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self._default_test_size = 0.1
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(
n_samples, self.test_size, self.train_size,
default_test_size=self._default_test_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int, default=5
Number of re-shuffling & splitting iterations.
test_size : float, int, default=0.2
If float, should be between 0.0 and 1.0 and represent the proportion
of groups to include in the test split (rounded up). If int,
represents the absolute number of test groups. If None, the value is
set to the complement of the train size.
The default will change in version 0.21. It will remain 0.2 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupShuffleSplit
>>> X = np.ones(shape=(8, 2))
>>> y = np.ones(shape=(8, 1))
>>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3])
>>> print(groups.shape)
(8,)
>>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42)
>>> gss.get_n_splits()
2
>>> for train_idx, test_idx in gss.split(X, y, groups):
... print("TRAIN:", train_idx, "TEST:", test_idx)
TRAIN: [2 3 4 5 6 7] TEST: [0 1]
TRAIN: [0 1 5 6 7] TEST: [2 3 4]
'''
@_deprecate_positional_args
def __init__(self, n_splits=5, *, test_size=None, train_size=None,
random_state=None):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self._default_test_size = 0.2
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super()._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
return super().split(X, y, groups)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
5
>>> print(sss)
StratifiedShuffleSplit(n_splits=5, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [5 2 3] TEST: [4 1 0]
TRAIN: [5 1 4] TEST: [0 2 3]
TRAIN: [5 0 2] TEST: [4 3 1]
TRAIN: [4 1 0] TEST: [2 3 5]
TRAIN: [0 5 1] TEST: [3 4 2]
"""
@_deprecate_positional_args
def __init__(self, n_splits=10, *, test_size=None, train_size=None,
random_state=None):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self._default_test_size = 0.1
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(
n_samples, self.test_size, self.train_size,
default_test_size=self._default_test_size)
if y.ndim == 2:
# for multi-label y, map each distinct row to a string repr
# using join because str(row) uses an ellipsis if len(row) > 1000
y = np.array([' '.join(row.astype('str')) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
# Find the sorted list of instances for each class:
# (np.unique above performs a sort, so code is O(n logn) already)
class_indices = np.split(np.argsort(y_indices, kind='mergesort'),
np.cumsum(class_counts)[:-1])
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation,
mode='clip')
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,) or (n_samples, n_labels)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super().split(X, y, groups)
def _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=None):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if test_size is None and train_size is None:
test_size = default_test_size
test_size_type = np.asarray(test_size).dtype.kind
train_size_type = np.asarray(train_size).dtype.kind
if (test_size_type == 'i' and (test_size >= n_samples or test_size <= 0)
or test_size_type == 'f' and (test_size <= 0 or test_size >= 1)):
raise ValueError('test_size={0} should be either positive and smaller'
' than the number of samples {1} or a float in the '
'(0, 1) range'.format(test_size, n_samples))
if (train_size_type == 'i' and (train_size >= n_samples or train_size <= 0)
or train_size_type == 'f' and (train_size <= 0 or train_size >= 1)):
raise ValueError('train_size={0} should be either positive and smaller'
' than the number of samples {1} or a float in the '
'(0, 1) range'.format(train_size, n_samples))
if train_size is not None and train_size_type not in ('i', 'f'):
raise ValueError("Invalid value for train_size: {}".format(train_size))
if test_size is not None and test_size_type not in ('i', 'f'):
raise ValueError("Invalid value for test_size: {}".format(test_size))
if (train_size_type == 'f' and test_size_type == 'f' and
train_size + test_size > 1):
raise ValueError(
'The sum of test_size and train_size = {}, should be in the (0, 1)'
' range. Reduce test_size and/or train_size.'
.format(train_size + test_size))
if test_size_type == 'f':
n_test = ceil(test_size * n_samples)
elif test_size_type == 'i':
n_test = float(test_size)
if train_size_type == 'f':
n_train = floor(train_size * n_samples)
elif train_size_type == 'i':
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
'With n_samples={}, test_size={} and train_size={}, the '
'resulting train set will be empty. Adjust any of the '
'aforementioned parameters.'.format(n_samples, test_size,
train_size)
)
return n_train, n_test
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Provides train/test indices to split data into train/test sets using a
predefined scheme specified by the user with the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
.. versionadded:: 0.16
Parameters
----------
test_fold : array-like of shape (n_samples,)
The entry ``test_fold[i]`` represents the index of the test set that
sample ``i`` belongs to. It is possible to exclude sample ``i`` from
any test set (i.e. include sample ``i`` in every training set) by
setting ``test_fold[i]`` equal to -1.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps)
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
@_deprecate_positional_args
def check_cv(cv=5, y=None, *, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays,
test_size=None,
train_size=None,
random_state=None,
shuffle=True,
stratify=None):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState instance, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like, default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=0.25)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for "
"shuffle=False")
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test,
train_size=n_train,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((_safe_indexing(a, train),
_safe_indexing(a, test)) for a in arrays))
# Tell nose that train_test_split is not a test.
# (Needed for external libraries that may use nose.)
# Use setattr to avoid mypy errors when monkeypatching.
setattr(train_test_split, '__test__', False)
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", FutureWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if value is None and hasattr(self, 'cvargs'):
value = self.cvargs.get(key, None)
if len(w) and w[0].category == FutureWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
par2/lamana-test | lamana/models/Wilson_LT.py | 1 | 9624 | #------------------------------------------------------------------------------
# Class-style model
# Users can define classes for custom laminate theory models.
# Additionally, users can define custom defaults.
import math
import collections as ct
import pandas as pd
from lamana.input_ import BaseDefaults
from lamana.theories import BaseModel
from lamana.lt_exceptions import IndeterminateError
class Model(BaseModel):
'''A modified laminate theory for circular biaxial flexure disks,
loaded with a flat piston punch on 3-ball support having two distinct
materials (polymer and ceramic).'''
'''Accept extra args and kwds here'''
def __init__(self):
self.Laminate = None
self.FeatureInput = None
self.LaminateModel = None
def _use_model_(self, Laminate, adjusted_z=False):
'''Return updated DataFrame and FeatureInput Return None if exceptions raised.
Variables
=========
df : DataFrame
LaminateModel with IDs and Dimensional Variables.
FeatureInut : dict
Geometry, laminate parameters and more. Updates Globals dict for
parameters in the dashboard output.
adjusted_z: bool; default=False
If True, uses z(m)* values instead; different assumption for internal calc.
'''
self.Laminate = Laminate
df = Laminate.LFrame.copy()
FeatureInput = Laminate.FeatureInput
# Dev-defined Exception Handling
if (FeatureInput['Parameters']['r'] == 0):
raise ZeroDivisionError('r=0 is invalid for log in the moment eqn.')
elif (FeatureInput['Parameters']['a'] == 0):
raise ZeroDivisionError('a=0 is invalid for log in the moment eqn.')
elif (FeatureInput['Parameters']['r'] < 0) | (FeatureInput['Parameters']['a'] < 0):
raise ValueError('Negative numbers are invalid for the log term '
'in moment eqn.')
elif FeatureInput['Parameters']['a'] > FeatureInput['Parameters']['R']:
raise ValueError('Support radius is larger than sample radius.')
elif df['side'].str.contains('INDET').any():
print('INDET value found. Rolling back...')
raise IndeterminateError('INDET value found. Unable to accurately calculate stress.')
#raise AssertionError('Indeterminate value found. Unable to accurately calculate stress.')
# Calling functions to calculate Qs and Ds
df.loc[:,'Q_11'] = self.calc_stiffness(df, FeatureInput['Properties']).q_11
df.loc[:,'Q_12'] = self.calc_stiffness(df, FeatureInput['Properties']).q_12
df.loc[:,'D_11'] = self.calc_bending(df, adj_z=adjusted_z).d_11
df.loc[:,'D_12'] = self.calc_bending(df, adj_z=adjusted_z).d_12
# Global Variable Update
if (FeatureInput['Parameters']['p'] == 1) & (Laminate.nplies%2 == 0):
D_11T = sum(df['D_11'])
D_12T = sum(df['D_12'])
else:
D_11T = sum(df.loc[df['label'] == 'interface','D_11']) # total D11
D_12T = sum(df.loc[df['label'] == 'interface','D_12'])
#print(FeatureInput['Geometric']['p'])
D_11p = (1./((D_11T**2 - D_12T**2))*D_11T) # ...
D_12n = -(1./((D_11T**2 - D_12T**2))*D_12T) # ...
v_eq = D_12T/D_11T # equiv. Poisson's ratio
M_r = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_r
M_t = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_t
K_r = (D_11p*M_r) + (D_12n*M_t) # curvatures
K_t = (D_12n*M_r) + (D_11p*M_t)
# Update FeatureInput
global_params = {'D_11T': D_11T,
'D_12T': D_12T,
'D_11p': D_11p,
'D_12n': D_12n,
'v_eq ': v_eq,
'M_r': M_r,
'M_t': M_t,
'K_r': K_r,
'K_t:': K_t,
}
FeatureInput['Globals'] = global_params
self.FeatureInput = FeatureInput # update with Globals
#print(FeatureInput)
# Calculate Strains and Stresses and Update DataFrame
df.loc[:,'strain_r'] = K_r * df.loc[:, 'Z(m)']
df.loc[:,'strain_t'] = K_t * df.loc[:, 'Z(m)']
df.loc[:, 'stress_r (Pa/N)'] = (df.loc[:,'strain_r'] * df.loc[:, 'Q_11']
) + (df.loc[:,'strain_t'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_t (Pa/N)'] = (df.loc[:,'strain_t'] * df.loc[:,'Q_11']
) + (df.loc[:,'strain_r'] * df.loc[:,'Q_12'])
df.loc[:,'stress_f (MPa/N)'] = df.loc[:,'stress_t (Pa/N)']/1e6
del df['Modulus']
del df['Poissons']
self.LaminateModel = df
return (df, FeatureInput)
#------------------------------------------------------------------------------
'''Prefer staticmethods here. Add formulas to doc strings.'''
def calc_stiffness(self, df, mat_props):
'''Return tuple of Series of (Q11, Q12) floats per lamina.'''
# Iterate to Apply Modulus and Poisson's to correct Material
'''Prefer cleaner ways to parse materials from mat_props'''
df_mat_props = pd.DataFrame(mat_props) # df easier to munge
df_mat_props.index.name = 'materials'
for material in df_mat_props.index:
#for material in mat_props.index:
mat_idx = df['matl'] == material
df.loc[mat_idx, 'Modulus'] = df_mat_props.loc[material, 'Modulus']
df.loc[mat_idx, 'Poissons'] = df_mat_props.loc[material, 'Poissons']
E = df['Modulus'] # series of moduli
v = df['Poissons']
stiffness = ct.namedtuple('stiffness', ['q_11', 'q_12'])
q_11 = E/(1-(v**2))
q_12 = (v*E)/(1-(v**2))
return stiffness(q_11, q_12)
def calc_bending(self, df, adj_z=False):
'''Return tuple of Series of (D11, D12) floats.'''
q_11 = df['Q_11']
q_12 = df['Q_12']
h = df['h(m)']
if not adj_z:
z = df['z(m)']
else:
z = df['z(m)*']
bending = ct.namedtuple('bending', ['d_11', 'd_12'])
d_11 = ((q_11*(h**3))/12.) + (q_11*h*(z**2))
d_12 = ((q_12*(h**3))/12.) + (q_12*h*(z**2))
return bending(d_11 , d_12)
def calc_moment(self, df, load_params, v_eq):
'''Return tuple of moments (radial and tangential); floats.
See Timishenko-Woinowsky: Eq. 91; default'''
P_a = load_params['P_a']
a = load_params['a']
r = load_params['r']
moments = ct.namedtuple('moments', ['m_r', 'm_t'])
m_r = ((P_a/(4*math.pi))*((1+v_eq)*math.log10(a/r)))
m_t = ((P_a/(4*math.pi))*(((1+v_eq)*math.log10(a/r))+(1-v_eq)))
return moments(m_r, m_t)
class Defaults(BaseDefaults):
'''Return parameters for building distributions cases. Useful for consistent
testing.
Dimensional defaults are inherited from utils.BaseDefaults().
Material-specific parameters are defined here by he user.
- Default geometric and materials parameters
- Default FeatureInputs
Examples
========
>>>dft = Defaults()
>>>dft.load_params
{'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}
>>>dft.mat_props
{'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
>>>dft.FeatureInput
{'Geometry' : '400-[200]-800',
'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,},
'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],},
'Custom' : None,
'Model' : Wilson_LT,
}
'''
def __init__(self):
BaseDefaults.__init__(self)
'''DEV: Add defaults first. Then adjust attributes.'''
# DEFAULTS ------------------------------------------------------------
# Build dicts of geometric and material parameters
self.load_params = {'R' : 12e-3, # specimen radius
'a' : 7.5e-3, # support ring radius
'p' : 5, # points/layer
'P_a' : 1, # applied load
'r' : 2e-4, # radial distance from center loading
}
self.mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
# ATTRIBUTES ----------------------------------------------------------
# FeatureInput
self.FeatureInput = self.get_FeatureInput(self.Geo_objects['standard'][0],
load_params=self.load_params,
mat_props=self.mat_props,
##custom_matls=None,
model='Wilson_LT',
global_vars=None) | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.