repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jorge2703/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
zingale/pyro2 | examples/multigrid/mg_test_vc_periodic.py | 1 | 5829 | #!/usr/bin/env python3
"""
Test the variable-coefficient MG solver with periodic data.
Here we solve::
div . ( alpha grad phi ) = f
with::
alpha = 2.0 + cos(2.0*pi*x)*cos(2.0*pi*y)
f = -16.0*pi**2*(cos(2*pi*x)*cos(2*pi*y) + 1)*sin(2*pi*x)*sin(2*pi*y)
This has the exact solution::
phi = sin(2.0*pi*x)*sin(2.0*pi*y)
on [0,1] x [0,1]
We use Dirichlet BCs on phi. For alpha, we do not have to impose the
same BCs, since that may represent a different physical quantity.
Here we take alpha to have Neumann BCs. (Dirichlet BCs for alpha will
force it to 0 on the boundary, which is not correct here)
"""
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import compare
import mesh.boundary as bnd
import mesh.patch as patch
import multigrid.variable_coeff_MG as MG
from util import msg, io
# the analytic solution
def true(x, y):
return np.sin(2.0*np.pi*x)*np.sin(2.0*np.pi*y)
# the coefficients
def alpha(x, y):
return 2.0 + np.cos(2.0*np.pi*x)*np.cos(2.0*np.pi*y)
# the righthand side
def f(x, y):
return -16.0*np.pi**2*(np.cos(2*np.pi*x)*np.cos(2*np.pi*y) + 1) * \
np.sin(2*np.pi*x)*np.sin(2*np.pi*y)
def test_vc_poisson_periodic(N, store_bench=False, comp_bench=False,
make_plot=False, verbose=1, rtol=1.e-12):
"""
test the variable-coefficient MG solver. The return value
here is the error compared to the exact solution, UNLESS
comp_bench=True, in which case the return value is the
error compared to the stored benchmark
"""
# test the multigrid solver
nx = N
ny = nx
# create the coefficient variable
g = patch.Grid2d(nx, ny, ng=1)
d = patch.CellCenterData2d(g)
bc_c = bnd.BC(xlb="periodic", xrb="periodic",
ylb="periodic", yrb="periodic")
d.register_var("c", bc_c)
d.create()
c = d.get_var("c")
c[:, :] = alpha(g.x2d, g.y2d)
# check whether the RHS sums to zero (necessary for periodic data)
rhs = f(g.x2d, g.y2d)
print("rhs sum: {}".format(np.sum(rhs[g.ilo:g.ihi+1, g.jlo:g.jhi+1])))
# create the multigrid object
a = MG.VarCoeffCCMG2d(nx, ny,
xl_BC_type="periodic", yl_BC_type="periodic",
xr_BC_type="periodic", yr_BC_type="periodic",
coeffs=c, coeffs_bc=bc_c,
verbose=verbose, vis=0, true_function=true)
# initialize the solution to 0
a.init_zeros()
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
# alternately, we can just use smoothing by uncommenting the following
# a.smooth(a.nlevels-1,10000)
# get the solution
v = a.get_solution()
# get the true solution
b = true(a.x2d, a.y2d)
# compute the error from the analytic solution -- note that with
# periodic BCs all around, there is nothing to normalize the
# solution. We subtract off the average of phi from the MG
# solution (we do the same for the true solution to put them on
# the same footing)
e = v - np.sum(v.v())/(nx*ny) - (b - np.sum(b[a.ilo:a.ihi+1, a.jlo:a.jhi+1])/(nx*ny))
enorm = e.norm()
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" %
(enorm, a.relative_error, a.num_cycles))
# plot the solution
if make_plot:
plt.clf()
plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w')
plt.subplot(121)
img1 = plt.imshow(np.transpose(v.v()),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
plt.xlabel("x")
plt.ylabel("y")
plt.title("nx = {}".format(nx))
plt.colorbar(img1)
plt.subplot(122)
img2 = plt.imshow(np.transpose(e.v()),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
plt.xlabel("x")
plt.ylabel("y")
plt.title("error")
plt.colorbar(img2)
plt.tight_layout()
plt.savefig("mg_vc_periodic_test.png")
# store the output for later comparison
bench = "mg_vc_poisson_periodic"
bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"
my_data = a.get_solution_object()
if store_bench:
my_data.write("{}/{}".format(bench_dir, bench))
# do we do a comparison?
if comp_bench:
compare_file = "{}/{}".format(bench_dir, bench)
msg.warning("comparing to {}".format(compare_file))
bench = io.read(compare_file)
result = compare.compare(my_data, bench, rtol)
if result == 0:
msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol))
else:
msg.warning("ERROR: {}\n".format(compare.errors[result]))
return result
# normal return -- error wrt true solution
return enorm
if __name__ == "__main__":
N = [16, 32, 64, 128, 256, 512]
err = []
plot = False
store = False
do_compare = False
for nx in N:
if nx == max(N):
plot = True
enorm = test_vc_poisson_periodic(nx, make_plot=plot,
store_bench=store, comp_bench=do_compare)
err.append(enorm)
# plot the convergence
N = np.array(N, dtype=np.float64)
err = np.array(err)
plt.clf()
plt.loglog(N, err, "x", color="r")
plt.loglog(N, err[0]*(N[0]/N)**2, "--", color="k")
plt.xlabel("N")
plt.ylabel("error")
fig = plt.gcf()
fig.set_size_inches(7.0, 6.0)
plt.tight_layout()
plt.savefig("mg_vc_periodic_converge.png")
| bsd-3-clause |
DTUWindEnergy/Python4WindEnergy | py4we/we_file_io.py | 1 | 4845 | """ IO classes for the DTU FileType class
Copyright (C) 2013 DTU Wind Energy
Authors: Pierre-Elouan Rethore, Mads Moelgaard Pedersen
Email: [email protected], [email protected]
Last revision: 9/10/2013
License: Apache v2.0, http://www.apache.org/licenses/LICENSE-2.0
"""
import numpy as np
import matplotlib.pyplot as plt
class WEFileIO(object):
"""Generic IO classe for file types classes."""
figure = None
def __init__(self, filename=None, file_type_name = "MyFileTypeName", file_extension="*"):
""" Initialized the classe using the filename
Parameters:
----------
filename : string (optional)
The file name to read and write
file_type_name : string (optional)
The name of the file type
file_extension : string (optional)
The file name extension of the file type, e.g. "txt"
"""
self.file_type_name = file_type_name
self.file_extension = file_extension
if filename:
### If there is a new filename, replace the object variable
self.filename = filename
### If the filename is provided, read the file
self.read()
def read(self, filename=None):
""" Read the file
Parameters:
----------
filename : string (optional)
The file name to read
Returns:
--------
data : string
the data read
"""
if filename:
### If there is a new filename, replace the object variable
self.filename = filename
if self.filename:
self._read()
#return self.data
else: # If self.filename == None, raise an exception
raise Exception('No filename has been provided')
def write(self, filename=None):
""" Write a file
Parameters:
----------
filename : string (optional)
The file name to write
"""
if filename:
# If there is a new filename, replace the instance variable
self.filename = filename
if self.filename:
self._write()
else:
# If self.filename == None, raise an exception
raise Exception('No filename has been provided')
def plot(self, *args, **kwargs):
if self.figure is None:
self.figure = plt.figure()
self._plot(self.figure, *args, **kwargs)
plt.show()
### Private methods to be implemented in the subclasses --------------------
def _read(self):
""" Read the file."""
### You are going to replace this code when you inherit from this class
raise NotImplementedError("This method must be implemented in subclass")
def _write(self):
""" Write a file"""
### You are going to replace this code when you inherit from this class
raise NotImplementedError("This method must be implemented in subclass")
def _plot(self, fig):
"""
Plot your data
:param matplotlib.figure: figure to plot on
"""
### You are going to replace this code when you inherit from this class
raise NotImplementedError("This method must be implemented in subclass")
## Do Some testing -------------------------------------------------------
import unittest
import os
class TestWEFileIO(unittest.TestCase):
""" Test class for FileType class """
def _duplicate(self, class_, filename):
original_filename = filename
new_filename = original_filename + '_new'
### Open a new file
original_file = class_(original_filename)
### write the file to a new filename
original_file.write(new_filename)
new_file = class_(new_filename)
return original_file, new_file
def _test_duplication(self, class_, filename):
""" Test if a file is written correctly by comparing with the data
of the original file
"""
original_file, new_file = self._duplicate(class_, filename)
### Unit test function to check if two things are equal
self.assertEqual(original_file.data, new_file.data)
os.remove(new_file.filename)
def _test_duplication_array(self, class_, filename):
""" Test if a file is written correctly by comparing with the data
of the original file
"""
original_file, new_file = self._duplicate(class_, filename)
### Unit test function to check if two things are equal
self.assertTrue(np.linalg.norm(original_file.data - new_file.data) < 1.0E-8)
os.remove(new_file.filename)
| apache-2.0 |
JaviMerino/trappy | trappy/plotter/ILinePlot.py | 1 | 9478 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the class for plotting and
customizing Line/Linear Plots with :mod:`trappy.trace.FTrace`
This plot only works when run from an IPython notebook
"""
import matplotlib.pyplot as plt
from trappy.plotter import AttrConf
from trappy.plotter import Utils
from trappy.plotter.Constraint import ConstraintManager
from trappy.plotter.ILinePlotGen import ILinePlotGen
from trappy.plotter.AbstractDataPlotter import AbstractDataPlotter
from trappy.plotter.ColorMap import ColorMap
from trappy.plotter import IPythonConf
from trappy.utils import handle_duplicate_index
import pandas as pd
if not IPythonConf.check_ipython():
raise ImportError("Ipython Environment not Found")
class ILinePlot(AbstractDataPlotter):
"""
This class uses :mod:`trappy.plotter.Constraint.Constraint` to
represent different permutations of input parameters. These
constraints are generated by creating an instance of
:mod:`trappy.plotter.Constraint.ConstraintManager`.
:param traces: The input data
:type traces: a list of :mod:`trappy.trace.FTrace` or :mod:`pandas.DataFrame` or a single instance of them
:param column: specifies the name of the column to
be plotted.
:type column: (str, list(str))
:param templates: TRAPpy events
.. note::
This is not required if a :mod:`pandas.DataFrame` is
used
:type templates: :mod:`trappy.base.Base`
:param filters: Filter the column to be plotted as per the
specified criteria. For Example:
::
filters =
{
"pid": [ 3338 ],
"cpu": [0, 2, 4],
}
:type filters: dict
:param per_line: Used to control the number of graphs
in each graph subplot row
:type per_line: int
:param concat: Draw all the pivots on a single graph
:type concat: bool
:param permute: Draw one plot for each of the traces specified
:type permute: bool
:param fill: Fill the area under the plots
:type fill: bool
:param drawstyle: Set the drawstyle to a matplotlib compatible
drawing style.
.. note::
Only "steps-post" is supported as a valid value for
the drawstyle. This creates a step plot.
:type drawstyle: str
:param sync_zoom: Synchronize the zoom of a group of plots.
Zooming in one plot of a group (see below) will zoom in every
plot of that group. Defaults to False.
:type sync_zoom: boolean
:param group: Name given to the plots created by this ILinePlot
instance. This name is only used for synchronized zoom. If
you zoom on any plot in a group all plots will zoom at the
same time.
:type group: string
:param signals: A string of the type event_name:column
to indicate the value that needs to be plotted
.. note::
- Only one of `signals` or both `templates` and
`columns` should be specified
- Signals format won't work for :mod:`pandas.DataFrame`
input
:type signals: str
"""
def __init__(self, traces, templates=None, **kwargs):
# Default keys, each can be overridden in kwargs
self._layout = None
super(ILinePlot, self).__init__(traces=traces,
templates=templates)
self.set_defaults()
for key in kwargs:
self._attr[key] = kwargs[key]
if "signals" in self._attr:
self._describe_signals()
self._check_data()
if "column" not in self._attr:
raise RuntimeError("Value Column not specified")
if self._attr["drawstyle"] and self._attr["drawstyle"].startswith("steps"):
self._attr["step_plot"] = True
zip_constraints = not self._attr["permute"]
self.c_mgr = ConstraintManager(traces, self._attr["column"], self.templates,
self._attr["pivot"],
self._attr["filters"], zip_constraints)
def savefig(self, *args, **kwargs):
raise NotImplementedError("Not Available for ILinePlot")
def view(self, test=False):
"""Displays the graph"""
# Defer installation of IPython components
# to the .view call to avoid any errors at
# when importing the module. This facilitates
# the importing of the module from outside
# an IPython notebook
if not test:
IPythonConf.iplot_install("ILinePlot")
if self._attr["concat"]:
self._plot_concat()
else:
self._plot(self._attr["permute"], test)
def set_defaults(self):
"""Sets the default attrs"""
self._attr["per_line"] = AttrConf.PER_LINE
self._attr["concat"] = AttrConf.CONCAT
self._attr["filters"] = {}
self._attr["pivot"] = AttrConf.PIVOT
self._attr["permute"] = False
self._attr["drawstyle"] = None
self._attr["step_plot"] = False
self._attr["fill"] = AttrConf.FILL
self._attr["draw_line"] = True
self._attr["scatter"] = AttrConf.PLOT_SCATTER
self._attr["point_size"] = AttrConf.POINT_SIZE
self._attr["map_label"] = {}
self._attr["title"] = AttrConf.TITLE
def _plot(self, permute, test):
"""Internal Method called to draw the plot"""
pivot_vals, len_pivots = self.c_mgr.generate_pivots(permute)
self._layout = ILinePlotGen(len_pivots, **self._attr)
plot_index = 0
for p_val in pivot_vals:
data_frame = pd.Series()
for constraint in self.c_mgr:
if permute:
trace_idx, pivot = p_val
if constraint.trace_index != trace_idx:
continue
legend = constraint._template.name + ":" + constraint.column
else:
pivot = p_val
legend = str(constraint)
result = constraint.result
if pivot in result:
data_frame[legend] = result[pivot]
if permute:
title = self.traces[plot_index].name
elif pivot != AttrConf.PIVOT_VAL:
title = "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
else:
title = ""
# Fix data frame indexes if necessary
data_frame = self._fix_indexes(data_frame)
self._layout.add_plot(plot_index, data_frame, title, test=test)
plot_index += 1
self._layout.finish()
def _plot_concat(self):
"""Plot all lines on a single figure"""
pivot_vals, _ = self.c_mgr.generate_pivots()
plot_index = 0
self._layout = ILinePlotGen(len(self.c_mgr), **self._attr)
for constraint in self.c_mgr:
result = constraint.result
title = str(constraint)
data_frame = pd.Series()
for pivot in pivot_vals:
if pivot in result:
if pivot == AttrConf.PIVOT_VAL:
key = ",".join(self._attr["column"])
else:
key = "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
data_frame[key] = result[pivot]
# Fix data frame indexes if necessary
data_frame = self._fix_indexes(data_frame)
self._layout.add_plot(plot_index, data_frame, title)
plot_index += 1
self._layout.finish()
def _fix_indexes(self, data_frame):
"""
In case of multiple traces with different indexes (i.e. x-axis values),
create new ones with same indexes
"""
# 1) Check if we are processing multiple traces
if len(data_frame) > 1:
# 2) Merge the data frames to obtain common indexes
df_columns = list(data_frame.keys())
dedup_data = [handle_duplicate_index(s) for s in data_frame.values]
data_frame = pd.Series(dedup_data, index=df_columns)
merged_df = pd.concat(data_frame.get_values(), axis=1)
merged_df.columns = df_columns
# 3) Fill NaN values depending on drawstyle
if self._attr["drawstyle"] == "steps-post":
merged_df = merged_df.ffill()
elif self._attr["drawstyle"] == "steps-pre":
merged_df = merged_df.bfill()
elif self._attr["drawstyle"] == "steps-mid":
merged_df = merged_df.ffill()
else:
# default
merged_df = merged_df.interpolate()
return merged_df
else:
return data_frame
| apache-2.0 |
chrisjdavie/shares | stock_trackers/open_previous_FTSE_data.py | 1 | 1450 | '''
Created on 4 Aug 2014
@author: chris
'''
def main():
from os_fns import open_file_list
dir_ftse = '/home/chris/Work/projects/shares/docs/stock_market_prices/FTSE_100/'
flist = open_file_list(dir_ftse)
import pickle
import dateutil.parser
dates = []
prices = []
for fname in flist:
print fname
f = open(fname,'r')
data = pickle.load( f )
print list(data)
f.close()
# print data['LastTradeDate']
date = data['LastTradeDate'].split('/')[::-1]
date[1] = str(int(date[1])+100)[1:]
date[2] = str(int(date[2])+100)[1:]
date = date[0] + '-' + date[2] + '-' + date[1]
dates.append(dateutil.parser.parse(date))
prices.append(float(data['LastTradePriceOnly']))
print dateutil.parser.parse(date), float(data['LastTradePriceOnly'])
from stock_plots import date_v_price
p = date_v_price(dates,prices)
p.show()
# import matplotlib.pyplot as pl
# import matplotlib.dates as mdates
# yearsFmt = mdates.DateFormatter('%Y-%m-%d')
# days = mdates.DayLocator()
#
# fig, ax = pl.subplots()
# ax.plot(dates,prices)
#
# ax.xaxis.set_major_locator(days)
# ax.xaxis.set_major_formatter(yearsFmt)
# # ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
# fig.autofmt_xdate()
# pl.show()
if __name__ == '__main__':
main() | mit |
Jwely/cdo-api-py | cdo_api_py/util.py | 1 | 3478 | from typing import List
def _make_geojson_point_feature(properties: dict = None, coordinates: list = None):
"""
Creates a geojson format feature.
:param properties: dictionary with the properties for the feature.
:param coordinates: The point features coordinates in [ lon, lat ] format.
:return:
"""
# fill empty properties with blank id property.
if properties is None:
properties = {'id': None}
if not isinstance(coordinates, list):
raise Exception # more specific
else:
if len(coordinates) != 2:
raise Exception # more specific
point_feature = {
"type": "Feature",
"properties": properties, # Attributes go here!
"geometry": {
"type": "Point",
"coordinates": coordinates # Coordinates go here! contains [lon, lat] pair
}
}
return point_feature
def _make_geojson_layer(features: List[dict], name: str):
"""
:param features: List of geojson compliant dictionaries.
:param name: The name attribute for the layer.
:return:
"""
# A geojson layer form looks like this.
feature_layer = {
"type": "FeatureCollection",
"name": name,
"crs": {
"type": "name",
"properties": {
"name": "urn:ogc:def:crs:OGC:1.3:CRS84"
},
},
"features": features
}
return feature_layer
def rows_to_point_geojson(
rows: List[dict],
name: str,
lat_key: str = "latitude",
lon_key: str = "longitude") -> dict:
"""
A tabular dataset with lat-lon pairs for each row can be converted into a feature class
point layer. A dataframe to dictionaries with a "records" orientation, or otherwise
created list of dictionaries for each feature of the feature layer is suitable. Function
intended for use in converting table of weather stations to a GIS format.
Example use:
``` python
from pathlib import Path
import pandas as pd
import geojson
# read a previously saved csv of weather stations
df = pd.read_csv('ghcnd_stations.csv', index_col=0)
# convert to geojson dictionary
station_geojson = rows_to_point_geojson(
df.to_dict(orient='records'),
name="ghcnd_stations"
)
# serialize it to a geojson file.
with Path("ghcnd_stations.geojson").open('w+') as f:
f.write(geojson.dumps(station_geojson, indent=4))
```
:param rows: List of dictionaries, every dictionary must have an entry for lat_key and lon_key.
:param name: name to give the feature class layer
:param lat_key: string key used to find latitude values in rows dictionaries
:param lon_key: string key used to find longitude values in row dictionaries.
:return: a dictionary serializable to geojson with geojson.dumps()
"""
features = list()
for row in rows:
if lon_key not in row.keys():
raise KeyError(f"{lon_key}, not found in row [{row}]")
if lat_key not in row.keys():
raise KeyError(f"{lat_key}, not found in row [{row}]")
row_coords = [row[lon_key], row[lat_key]]
row_feature = _make_geojson_point_feature(properties=row, coordinates=row_coords)
features.append(row_feature)
feature_layer = _make_geojson_layer(features, name=name)
return feature_layer | mit |
satishgoda/bokeh | bokeh/charts/tests/test_data_adapter.py | 37 | 3285 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import DataAdapter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDataAdapter(unittest.TestCase):
def setUp(self):
self._values = OrderedDict()
self._values['first'] = [2., 5., 3.]
self._values['second'] = [4., 1., 4.]
self._values['third'] = [6., 4., 3.]
def test_list(self):
values = list(self._values.values())
da = DataAdapter(values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_array(self):
values = np.array(list(self._values.values()))
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_pandas(self):
values = pd.DataFrame(self._values)
da = DataAdapter(values)
# TODO: THIS SHOULD BE FIXED..
#self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
# We expect data adapter index to be the same as the underlying pandas
# object and not the default created by DataAdapter
self.assertEqual(da.index, [0, 1, 2])
def test_ordered_dict(self):
da = DataAdapter(self._values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_blaze_data_no_fields(self):
import blaze
valuesdf = pd.DataFrame(self._values)
values = blaze.Data(valuesdf)
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, [0, 1, 2])
xs, _values = DataAdapter.get_index_and_data(values, None)
assert_array_equal([0,1,2], xs)
| bsd-3-clause |
pm2111/Heart-Defibrillation-Project | last few python scripts/average_AF_duration_many_systems.py | 1 | 3242 | import matplotlib.pylab as plt
import numpy as np
import os
#os.chdir("/Users/petermarinov/msci project/intermittent /50 hearts 10^5 nr excited 10^3 restitution 0.333 ")
path = []
path.append( "/Users/petermarinov/msci project/all code/50 hearts preferential/50 hearts anis 0.5 corr 0.2/50 hearts 0.333 restitution")
path.append( "/Users/petermarinov/msci project/all code/50 hearts preferential/50 hearts anis 0.5 corr 0.5/50 hearts 0.333 restitution")
path.append( "/Users/petermarinov/msci project/all code/50 hearts preferential/50 hearts anis -0.5 corr 0.99/50 hearts 0.333 restitution")
path.append( "/Users/petermarinov/msci project/all code/50 hearts preferential/50 hearts anis 0.2 corr 0.99/50 hearts 0.333 restitution")
path.append( "/Users/petermarinov/msci project/all code/50 hearts preferential/50 hearts anis 0.5 corr 0.99/50 hearts 0.333 restitution")
path.append( "/Users/petermarinov/msci project/all code/50 hearts preferential/50 hearts anis 0.99 corr 0.99/50 hearts 0.333 restitution")
previous_fib = np.genfromtxt("/Users/petermarinov/msci project/all code/intermittent /paroxysmal.txt") #1 for christensen, 2 for 1d corr
lab = ["anis = 0.5 corr = 0.2","anis = 0.5 corr = 0.5","anis = -0.5 corr = 0.99","anis = 0.2 corr = 0.99","anis = 0.5 corr = 0.99","anis = 0.99 corr = 0.99"]
horizontal = 100
af = np.zeros((6,8))
number_events = np.zeros((6,8))
L = 200 #system size
z = 0
for y in path:
filenames = []
for f in os.listdir(y):
if f.endswith(".txt"):
filenames.append('/' +f )
j=0
for x in filenames:
data = np.genfromtxt(y + x)
size = np.shape(data)
counter=False
for i in range (0,size[0]):
if (data[i]>1.5*L)==True:
af[z][j] +=1
counter = True
if (data[i] >1.5*L and data[i-1] <1.5*L):
number_events[z][j] +=1
j+=1
if j==8: #iterate through 8 nus
j=0
z+=1
avg_event_dur = np.divide(af,number_events)*100 #1000 timesteps inbetween each recording
nus = []
nus.append(np.linspace(0.10,0.38,8))
nus.append(np.linspace(0.20,0.48,8))
nus.append(np.linspace(0.40,0.68,8))
nus.append(np.linspace(0.40,0.68,8))
nus.append(np.linspace(0.40,0.68,8))
nus.append(np.linspace(0.60,0.88,8))
previous_fib = np.genfromtxt("/Users/petermarinov/msci project/all code/intermittent /paroxysmal.txt")
preferential = [196,214,250,564,734,149,31,0,0,0,0,0]
nu_pref = np.linspace(0.15,0.7,12)
plt.figure()
for i in range (0,6):
plt.plot(nus[i],avg_event_dur[i],"-o", label = lab[i], linewidth =2)
#plt.plot(previous_fib[:,0],previous_fib[:,1], label = "Christensen et al.'s Original Model")
#plt.plot(nu_pref,preferential, label = "preferential attachment structure F=10", linewidth = 2 )
#plt.plot(previous_fib[:,0],previous_fib[:,1], label = "Christensen et al.'s orignal model", linewidth = 2 )
#plt.plot(previous_fib[:,2],previous_fib[:,3], label = "Moderate APD", linewidth = 2 )
plt.xlabel(r"$\nu$", fontsize=18)
plt.ylabel("Average Event Duration [simulation timesteps]", fontsize=18)
#plt.title( "Intermittent AF comparison between models for 50 randomly seeded hearts" ,fontsize=12)
plt.legend()
plt.grid()
plt.show()
| mit |
maheshakya/scikit-learn | sklearn/tests/test_hmm.py | 31 | 28118 | from __future__ import print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in range(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_length_one_signal(self):
obs = [self.prng.rand(10, self.n_features),
self.prng.rand(8, self.n_features),
self.prng.rand(1, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which has no identity
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in range(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| bsd-3-clause |
pv/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
zycdragonball/tensorflow | tensorflow/contrib/keras/python/keras/callbacks.py | 8 | 36338 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import numpy as np
import six
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver as saver_lib
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
logging.warning(
'Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self):
super(TerminateOnNaN, self).__init__()
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target, verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.' % (self.monitor))
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' % (epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode))
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
logging.warning('Early stopping requires %s available!' % (self.monitor))
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json', 'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires ' 'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network
for histograms computation.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
self.batch_size = batch_size
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf_summary.histogram(weight.name, weight)
if self.write_grads:
grads = model.optimizer.get_gradients(model.total_loss, weight)
tf_summary.histogram('{}_grad'.format(weight.name), grads)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
if self.embeddings_freq:
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
embeddings = {
layer.name: layer.weights[0]
for layer in self.model.layers if layer.name in embeddings_layer_names
}
self.saver = saver_lib.Saver(list(embeddings.values()))
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()
}
config = projector.ProjectorConfig()
self.embeddings_ckpt_path = os.path.join(self.log_dir,
'keras_embedding.ckpt')
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
val_data = self.validation_data
tensors = (
self.model.inputs + self.model.targets + self.model.sample_weights)
if self.model.uses_learning_phase:
tensors += [K.learning_phase()]
assert len(val_data) == len(tensors)
val_size = val_data[0].shape[0]
i = 0
while i < val_size:
step = min(self.batch_size, val_size - i)
batch_val = []
batch_val.append(val_data[0][i:i + step])
batch_val.append(val_data[1][i:i + step])
batch_val.append(val_data[2][i:i + step])
if self.model.uses_learning_phase:
batch_val.append(val_data[3])
feed_dict = dict(zip(tensors, batch_val))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
i += self.batch_size
if self.embeddings_freq and self.embeddings_ckpt_path:
if epoch % self.embeddings_freq == 0:
self.saver.save(self.sess, self.embeddings_ckpt_path, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=1e-4,
cooldown=0,
min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode))
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Learning Rate Plateau Reducing requires %s available!' %
self.monitor)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.file_flags = 'b' if six.PY2 and os.name == 'nt' else ''
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a' + self.file_flags)
else:
self.csv_file = open(self.filename, 'w' + self.file_flags)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
ahmadia/bokeh | examples/compat/mpl/subplots.py | 13 | 1798 | """
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import show
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.linspace(np.amin(x), np.amax(x), len(x))
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.ylabel('II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.ylabel('III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('IV', fontsize=20)
# We create the figure in matplotlib and then we "pass it" to Bokeh
show(mpl.to_bokeh(name="subplots"))
| bsd-3-clause |
osmanbaskaya/text-classification-with-convnets | evaluate.py | 2 | 1455 | """ Package contains performance evaluation methods.
"""
import sys
import data
from model import create_logistic_model, create_regression_model
from utils import cross_validate
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import accuracy_score
fn = sys.argv[1]
n_folds = int(sys.argv[2])
problem_type = sys.argv[3]
use_pretrained_embeddings = True if sys.argv[4].lower() == 'true' else False
print >> sys.stderr, fn, n_folds, problem_type, use_pretrained_embeddings
assert problem_type in ('regression', 'classification'), "Problem type should be either regression or classification"
maxlen = 20 # maximum length for each sentence.
max_features = 25000 # length of the vocabulary.
batch_size = 32
nb_epoch = 3
additional_num_words = 2 # "UNK" and "PADDING"
(X_train, y_train), (_, _), word_idx = data.read(fn, 0.0, maxlen, max_features, problem_type)
print >> sys.stderr, 'X_train shape:', X_train.shape
max_features = min(max_features, len(word_idx) + additional_num_words)
if problem_type == 'regression':
model = create_regression_model(maxlen, max_features, word_idx, use_pretrained_embeddings)
cross_validate(model, X_train, y_train, n_folds, batch_size, nb_epoch, func_for_evaluation=spearmanr)
else:
model = create_logistic_model(maxlen, max_features, word_idx, use_pretrained_embeddings)
cross_validate(model, X_train, y_train, n_folds, batch_size, nb_epoch, func_for_evaluation=accuracy_score)
| mit |
SamStudio8/scikit-bio | skbio/diversity/beta/tests/test_unifrac.py | 6 | 30062 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
from io import StringIO
import os
import numpy as np
import pandas as pd
from skbio import TreeNode, DistanceMatrix
from skbio.util import get_data_path
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
class TestUniFrac(TestCase):
def setUp(self):
self.table1 = np.array(
[[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1],
[5, 3, 5, 0, 0],
[0, 0, 0, 3, 5]])
self.sids1 = list('ABCDEF')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
u')root;'))
self.t2 = TreeNode.read(
StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
u'root;'))
self.oids2 = ['OTU%d' % i for i in range(1, 5)]
# the following table and tree are derived from the QIIME 1.9.1
# "tiny-test" data
tt_table_fp = get_data_path(
os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
tt_tree_fp = get_data_path(
os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
index_col=0)
self.q_tree = TreeNode.read(tt_tree_fp)
def test_unweighted_unifrac_qiime_tiny_test(self):
dm_fp = get_data_path(
os.path.join('qiime-191-tt', 'unweighted_unifrac_dm.txt'), 'data')
expected = DistanceMatrix.read(dm_fp)
for sid1 in self.q_table.columns:
for sid2 in self.q_table.columns:
actual = unweighted_unifrac(
self.q_table[sid1], self.q_table[sid2],
otu_ids=self.q_table.index, tree=self.q_tree)
self.assertAlmostEqual(actual, expected[sid1, sid2])
def test_weighted_unifrac_qiime_tiny_test(self):
dm_fp = get_data_path(
os.path.join('qiime-191-tt', 'weighted_unifrac_dm.txt'), 'data')
expected = DistanceMatrix.read(dm_fp)
for sid1 in self.q_table.columns:
for sid2 in self.q_table.columns:
actual = weighted_unifrac(
self.q_table[sid1], self.q_table[sid2],
otu_ids=self.q_table.index, tree=self.q_tree)
self.assertAlmostEqual(actual, expected[sid1, sid2],
msg="%s, %s" % (sid1, sid2))
def test_weighted_normalized_unifrac_qiime_tiny_test(self):
dm_fp = get_data_path(
os.path.join('qiime-191-tt', 'weighted_normalized_unifrac_dm.txt'),
'data')
expected = DistanceMatrix.read(dm_fp)
for sid1 in self.q_table.columns:
for sid2 in self.q_table.columns:
actual = weighted_unifrac(
self.q_table[sid1], self.q_table[sid2],
otu_ids=self.q_table.index, tree=self.q_tree,
normalized=True)
self.assertAlmostEqual(actual, expected[sid1, sid2])
def test_unweighted_extra_tips(self):
# UniFrac values are the same despite unobserved tips in the tree
for i in range(len(self.table1)):
for j in range(len(self.table1)):
actual = unweighted_unifrac(
self.table1[i], self.table1[j], self.oids1,
self.t1_w_extra_tips)
expected = unweighted_unifrac(
self.table1[i], self.table1[j], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_weighted_extra_tips(self):
# UniFrac values are the same despite unobserved tips in the tree
for i in range(len(self.table1)):
for j in range(len(self.table1)):
actual = weighted_unifrac(
self.table1[i], self.table1[j], self.oids1,
self.t1_w_extra_tips)
expected = weighted_unifrac(
self.table1[i], self.table1[j], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_unweighted_minimal_trees(self):
# expected values computed by hand
# zero tips
tree = TreeNode.read(StringIO(u'root;'))
actual = unweighted_unifrac([], [], [], tree)
expected = 0.0
self.assertEqual(actual, expected)
# two tips
tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
actual = unweighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'], tree)
expected = 1.0
self.assertEqual(actual, expected)
def test_weighted_minimal_trees(self):
# expected values computed by hand
# zero tips
tree = TreeNode.read(StringIO(u'root;'))
actual = weighted_unifrac([], [], [], tree)
expected = 0.0
self.assertEqual(actual, expected)
# two tips
tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
actual = weighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_unweighted_root_not_observed(self):
# expected values computed with QIIME 1.9.1 and by hand
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered shared
actual = unweighted_unifrac([1, 1, 0, 0], [1, 0, 0, 0],
self.oids2, self.t2)
# for clarity of what I'm testing, compute expected as it would
# based on the branch lengths. the values that compose shared was
# a point of confusion for me here, so leaving these in for
# future reference
expected = 0.2 / (0.1 + 0.2 + 0.3) # 0.3333333333
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered shared
actual = unweighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
self.oids2, self.t2)
# for clarity of what I'm testing, compute expected as it would
# based on the branch lengths. the values that compose shared was
# a point of confusion for me here, so leaving these in for
# future reference
expected = 0.7 / (1.1 + 0.5 + 0.7) # 0.3043478261
self.assertAlmostEqual(actual, expected)
def test_weighted_root_not_observed(self):
# expected values computed by hand, these disagree with QIIME 1.9.1
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered shared
actual = weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0],
self.oids2, self.t2)
expected = 0.15
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered shared
actual = weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
self.oids2, self.t2)
expected = 0.6
self.assertAlmostEqual(actual, expected)
def test_weighted_normalized_root_not_observed(self):
# expected values computed by hand, these disagree with QIIME 1.9.1
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered shared
actual = weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0],
self.oids2, self.t2, normalized=True)
expected = 0.1764705882
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered shared
actual = weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0],
self.oids2, self.t2, normalized=True)
expected = 0.1818181818
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac_kwargs(self):
# confirm that **kwargs can be passed
actual = unweighted_unifrac(self.table1[0], self.table1[0], self.oids1,
self.t1, not_a_known_parameter=42)
self.assertAlmostEqual(actual, 0.0)
def test_weighted_unifrac_kwargs(self):
# confirm that **kwargs can be passed
actual = weighted_unifrac(self.table1[0], self.table1[0], self.oids1,
self.t1, not_a_known_parameter=42)
self.assertAlmostEqual(actual, 0.0)
def test_unweighted_unifrac_identity(self):
for i in range(len(self.table1)):
actual = unweighted_unifrac(
self.table1[i], self.table1[i], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac_symmetry(self):
for i in range(len(self.table1)):
for j in range(len(self.table1)):
actual = unweighted_unifrac(
self.table1[i], self.table1[j], self.oids1, self.t1)
expected = unweighted_unifrac(
self.table1[j], self.table1[i], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_invalid_input(self):
# Many of these tests are duplicated from
# skbio.diversity.tests.test_base, but I think it's important to
# confirm that they are being run when *unifrac is called.
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(DuplicateNodeError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
u_counts = [1, 2, 3]
v_counts = [1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, -3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
u_counts = [1, 2, 3]
v_counts = [1, 1, -1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, unweighted_unifrac, u_counts, v_counts,
otu_ids, t)
self.assertRaises(ValueError, weighted_unifrac, u_counts, v_counts,
otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
u_counts = [1, 2, 3]
v_counts = [1, 1, 1]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, unweighted_unifrac, u_counts,
v_counts, otu_ids, t)
self.assertRaises(MissingNodeError, weighted_unifrac, u_counts,
v_counts, otu_ids, t)
def test_unweighted_unifrac_non_overlapping(self):
# these communities only share the root node
actual = unweighted_unifrac(
self.table1[4], self.table1[5], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac_zero_counts(self):
actual = unweighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
[], [], [], self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_unweighted_unifrac(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# unweighted unifrac implementation
# sample A versus all
actual = unweighted_unifrac(
self.table1[0], self.table1[1], self.oids1, self.t1)
expected = 0.238095238095
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[0], self.table1[2], self.oids1, self.t1)
expected = 0.52
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[0], self.table1[3], self.oids1, self.t1)
expected = 0.52
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[0], self.table1[4], self.oids1, self.t1)
expected = 0.545454545455
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[0], self.table1[5], self.oids1, self.t1)
expected = 0.619047619048
self.assertAlmostEqual(actual, expected)
# sample B versus remaining
actual = unweighted_unifrac(
self.table1[1], self.table1[2], self.oids1, self.t1)
expected = 0.347826086957
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[1], self.table1[3], self.oids1, self.t1)
expected = 0.347826086957
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[1], self.table1[4], self.oids1, self.t1)
expected = 0.68
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[1], self.table1[5], self.oids1, self.t1)
expected = 0.421052631579
self.assertAlmostEqual(actual, expected)
# sample C versus remaining
actual = unweighted_unifrac(
self.table1[2], self.table1[3], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[2], self.table1[4], self.oids1, self.t1)
expected = 0.68
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[2], self.table1[5], self.oids1, self.t1)
expected = 0.421052631579
self.assertAlmostEqual(actual, expected)
# sample D versus remaining
actual = unweighted_unifrac(
self.table1[3], self.table1[4], self.oids1, self.t1)
expected = 0.68
self.assertAlmostEqual(actual, expected)
actual = unweighted_unifrac(
self.table1[3], self.table1[5], self.oids1, self.t1)
expected = 0.421052631579
self.assertAlmostEqual(actual, expected)
# sample E versus remaining
actual = unweighted_unifrac(
self.table1[4], self.table1[5], self.oids1, self.t1)
expected = 1.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_identity(self):
for i in range(len(self.table1)):
actual = weighted_unifrac(
self.table1[i], self.table1[i], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_symmetry(self):
for i in range(len(self.table1)):
for j in range(len(self.table1)):
actual = weighted_unifrac(
self.table1[i], self.table1[j], self.oids1, self.t1)
expected = weighted_unifrac(
self.table1[j], self.table1[i], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_non_overlapping(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# weighted unifrac implementation
# these communities only share the root node
actual = weighted_unifrac(
self.table1[4], self.table1[5], self.oids1, self.t1)
expected = 4.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_zero_counts(self):
actual = weighted_unifrac(
[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
# calculated the following by hand, as QIIME 1.9.1 tells the user
# that values involving empty vectors will be uninformative, and
# returns 1.0
actual = weighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 2.0
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
[], [], [], self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# weighted unifrac implementation
actual = weighted_unifrac(
self.table1[0], self.table1[1], self.oids1, self.t1)
expected = 2.4
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[2], self.oids1, self.t1)
expected = 1.86666666667
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[3], self.oids1, self.t1)
expected = 2.53333333333
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[4], self.oids1, self.t1)
expected = 1.35384615385
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[5], self.oids1, self.t1)
expected = 3.2
self.assertAlmostEqual(actual, expected)
# sample B versus remaining
actual = weighted_unifrac(
self.table1[1], self.table1[2], self.oids1, self.t1)
expected = 2.26666666667
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[1], self.table1[3], self.oids1, self.t1)
expected = 0.933333333333
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[1], self.table1[4], self.oids1, self.t1)
expected = 3.2
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[1], self.table1[5], self.oids1, self.t1)
expected = 0.8375
self.assertAlmostEqual(actual, expected)
# sample C versus remaining
actual = weighted_unifrac(
self.table1[2], self.table1[3], self.oids1, self.t1)
expected = 1.33333333333
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[2], self.table1[4], self.oids1, self.t1)
expected = 1.89743589744
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[2], self.table1[5], self.oids1, self.t1)
expected = 2.66666666667
self.assertAlmostEqual(actual, expected)
# sample D versus remaining
actual = weighted_unifrac(
self.table1[3], self.table1[4], self.oids1, self.t1)
expected = 2.66666666667
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[3], self.table1[5], self.oids1, self.t1)
expected = 1.33333333333
self.assertAlmostEqual(actual, expected)
# sample E versus remaining
actual = weighted_unifrac(
self.table1[4], self.table1[5], self.oids1, self.t1)
expected = 4.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_identity_normalized(self):
for i in range(len(self.table1)):
actual = weighted_unifrac(
self.table1[i], self.table1[i], self.oids1, self.t1,
normalized=True)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_symmetry_normalized(self):
for i in range(len(self.table1)):
for j in range(len(self.table1)):
actual = weighted_unifrac(
self.table1[i], self.table1[j], self.oids1, self.t1,
normalized=True)
expected = weighted_unifrac(
self.table1[j], self.table1[i], self.oids1, self.t1,
normalized=True)
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_non_overlapping_normalized(self):
# these communities only share the root node
actual = weighted_unifrac(
self.table1[4], self.table1[5], self.oids1, self.t1,
normalized=True)
expected = 1.0
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1,
normalized=True)
expected = 1.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_zero_counts_normalized(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# weighted unifrac implementation
actual = weighted_unifrac(
[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1,
normalized=True)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
[1, 1, 1, 0, 0], [0, 0, 0, 0, 0], self.oids1, self.t1,
normalized=True)
expected = 1.0
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
[], [], [], self.t1, normalized=True)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_weighted_unifrac_normalized(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# weighted unifrac implementation
actual = weighted_unifrac(
self.table1[0], self.table1[1], self.oids1, self.t1,
normalized=True)
expected = 0.6
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[2], self.oids1, self.t1,
normalized=True)
expected = 0.466666666667
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[3], self.oids1, self.t1,
normalized=True)
expected = 0.633333333333
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[4], self.oids1, self.t1,
normalized=True)
expected = 0.338461538462
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[0], self.table1[5], self.oids1, self.t1,
normalized=True)
expected = 0.8
self.assertAlmostEqual(actual, expected)
# sample B versus remaining
actual = weighted_unifrac(
self.table1[1], self.table1[2], self.oids1, self.t1,
normalized=True)
expected = 0.566666666667
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[1], self.table1[3], self.oids1, self.t1,
normalized=True)
expected = 0.233333333333
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[1], self.table1[4], self.oids1, self.t1,
normalized=True)
expected = 0.8
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[1], self.table1[5], self.oids1, self.t1,
normalized=True)
expected = 0.209375
self.assertAlmostEqual(actual, expected)
# sample C versus remaining
actual = weighted_unifrac(
self.table1[2], self.table1[3], self.oids1, self.t1,
normalized=True)
expected = 0.333333333333
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[2], self.table1[4], self.oids1, self.t1,
normalized=True)
expected = 0.474358974359
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[2], self.table1[5], self.oids1, self.t1,
normalized=True)
expected = 0.666666666667
self.assertAlmostEqual(actual, expected)
# sample D versus remaining
actual = weighted_unifrac(
self.table1[3], self.table1[4], self.oids1, self.t1,
normalized=True)
expected = 0.666666666667
self.assertAlmostEqual(actual, expected)
actual = weighted_unifrac(
self.table1[3], self.table1[5], self.oids1, self.t1,
normalized=True)
expected = 0.333333333333
self.assertAlmostEqual(actual, expected)
# sample E versus remaining
actual = weighted_unifrac(
self.table1[4], self.table1[5], self.oids1, self.t1,
normalized=True)
expected = 1.0
self.assertAlmostEqual(actual, expected)
if __name__ == "__main__":
main()
| bsd-3-clause |
decvalts/landlab | landlab/components/glacier_thin_ice_model/examples/glacier_example.py | 1 | 3113 |
import numpy as np
import scipy.io as io
from landlab.components.glacier_thin_ice_model.glacier import Glacier
from landlab import RasterModelGrid
import matplotlib.pyplot as plt
import matplotlib as mpl
def main():
'''
B: bed elevation
b_dot:
dx: node spacing (dx = dy)
nx: number of columns of nodes
ny: number of rows of nodes
t_STOP: number of years of simulation
dt: time step interval, in years
t: starting time of simulation, default, 0
'''
input_file = 'mb4_spin1.mat'
mat = io.loadmat(input_file)
B = mat['B']
b_dot = mat['b_dot']
dx = mat['dx'][0,0]
dy = mat['dy'][0,0]
nx = np.int_(mat['nx'][0,0])
ny = np.int_(mat['ny'][0,0])
t_STOP = 500 ### 1000
dt = 0.08333
t = 0
### put input data in a dictionary, and pass the dictionary as arguments
B,b_dot,S = flatten(B,b_dot)
dictionary = {'S':S,'B':B,'b_dot':b_dot,'dt':dt,'t_STOP':t_STOP,'t':t,'dx':dx,'nx':nx,'ny':ny}
grid = RasterModelGrid(nx,ny,dx)
gla = Glacier(grid,dictionary)
gla.recursive_steps()
### save outputs in ascill file
S_map = gla.grid['node']['ice_elevation'] ### ice surface elevation matrix
H_map = gla.grid['node']['ice_thickness'] ### ice thickness matrix
I_map = gla.grid['node']['I_map'] ### ice mask matrix
np.savetxt('S_map.txt',S_map)
np.savetxt('H_map.txt',H_map)
np.savetxt('I_map.txt',I_map)
### plot S_map
plt.figure(figsize=(8,6))
plt.imshow(S_map)
plt.colorbar()
plt.savefig('S_map_{0}yrs.pdf'.format(t_STOP),dpi=300)
### plot H_map
plt.figure(figsize=(8,6))
plt.imshow(H_map)
plt.colorbar()
plt.savefig('H_map_{0}yrs.pdf'.format(t_STOP),dpi=300)
### plot map of observed and simulated masks of ice
plot_mask('I_map.txt','obs_map.txt')
def flatten(B,b_dot):
### flatten two dimensional matrix
B = B.T.flatten()
B[np.isnan(B)] = 0
S = B
b_dot = b_dot.T.flatten()
return B,b_dot,S
def plot_mask(ifile_sim,ifile_obs):
'''
plot simulated and observed masks of ice
'''
# make presence of ice from simulated ice file as 1
# make presence of ice from observed ice file as 2
# make presence of ice in overlapping area as 3
dat_sim = np.genfromtxt(ifile_sim)
dat_obs = np.genfromtxt(ifile_obs)
dat_obs[np.where(dat_obs==1)] = 2
dat_add = dat_sim + dat_obs
plt.figure(figsize=(10,8))
# define the colormap
cmap = plt.cm.jet
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist[0] = (.5,.5,.5,1.0)
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
# define the bins and normalize
bounds = np.linspace(0,4,5)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
img1 = plt.imshow(dat_sim,cmap=cmap,norm=norm) ### 1
img2 = plt.imshow(dat_obs,cmap=cmap,norm=norm) ### 2
img3 = plt.imshow(dat_add,cmap=cmap,norm=norm) ### 3
cbar = plt.colorbar(img3, ticks=[0.5,1.5, 2.5, 3.5], orientation='vertical')
cbar.ax.set_yticklabels(['No ice','Simulated Only', 'Observed Only', 'Overlapped'])# horizontal colorbar
cbar.ax.tick_params(labelsize=12)
plt.savefig('mask.pdf',dpi=300)
if __name__ == "__main__":
main()
| mit |
arcticshores/django-pandas | django_pandas/tests/test_manager.py | 3 | 7222 | from django.test import TestCase
import pandas as pd
import numpy as np
from .models import (
DataFrame, WideTimeSeries,
LongTimeSeries, PivotData, MyModelChoice
)
import pandas.util.testing as tm
class DataFrameTest(TestCase):
def setUp(self):
data = {
'col1': np.array([1, 2, 3, 5, 6, 5, 5]),
'col2': np.array([10.0, 2.4, 3.0, 5, 6, 5, 5]),
'col3': np.array([9.5, 2.4, 3.0, 5, 6, 7.5, 2.5]),
'col4': np.array([9, 2, 3, 5, 6, 7, 2]),
}
index = pd.Index(['a', 'b', 'c', 'd', 'e', 'f', 'h'])
self.df = pd.DataFrame(index=index, data=data)
for ix, cols in self.df.iterrows():
DataFrame.objects.create(
index=ix,
col1=cols['col1'],
col2=cols['col2'],
col3=cols['col3'],
col4=cols['col4']
)
def test_dataframe(self):
qs = DataFrame.objects.all()
df = qs.to_dataframe()
n, c = df.shape
self.assertEqual(n, qs.count())
flds = DataFrame._meta.get_all_field_names()
self.assertEqual(c, len(flds))
qs2 = DataFrame.objects.filter(index__in=['a', 'b', 'c'])
df2 = qs2.to_dataframe(['col1', 'col2', 'col3'], index='index')
n, c = df2.shape
self.assertEqual((n, c), (3, 3))
class TimeSeriesTest(TestCase):
def unpivot(self, frame):
N, K = frame.shape
data = {'value': frame.values.ravel('F'),
'variable': np.array(frame.columns).repeat(N),
'date': np.tile(np.array(frame.index), K)}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
def setUp(self):
self.ts = tm.makeTimeDataFrame(100)
self.ts2 = self.unpivot(self.ts).set_index('date')
self.ts.columns = ['col1', 'col2', 'col3', 'col4']
create_list = []
for ix, cols in self.ts.iterrows():
create_list.append(WideTimeSeries(date_ix=ix, col1=cols['col1'],
col2=cols['col2'],
col3=cols['col3'],
col4=cols['col4']))
WideTimeSeries.objects.bulk_create(create_list)
create_list = [LongTimeSeries(date_ix=r[0], series_name=r[1][0],
value=r[1][1])
for r in self.ts2.iterrows()]
LongTimeSeries.objects.bulk_create(create_list)
def test_widestorage(self):
qs = WideTimeSeries.objects.all()
df = qs.to_timeseries(index='date_ix', storage='wide')
self.assertEqual(df.shape, (qs.count(), 5))
self.assertIsInstance(df.index, pd.tseries.index.DatetimeIndex)
self.assertIsNone(df.index.freq)
def test_longstorage(self):
qs = LongTimeSeries.objects.all()
df = qs.to_timeseries(index='date_ix', pivot_columns='series_name',
values='value',
storage='long')
self.assertEqual(set(qs.values_list('series_name', flat=True)),
set(df.columns.tolist()))
self.assertEqual(qs.filter(series_name='A').count(), len(df['A']))
self.assertIsInstance(df.index, pd.tseries.index.DatetimeIndex)
self.assertIsNone(df.index.freq)
def test_resampling(self):
qs = LongTimeSeries.objects.all()
rs_kwargs = {'how': 'sum', 'kind': 'period'}
df = qs.to_timeseries(index='date_ix', pivot_columns='series_name',
values='value', storage='long',
freq='M', rs_kwargs=rs_kwargs)
self.assertEqual([d.month for d in qs.dates('date_ix', 'month')],
df.index.month.tolist())
self.assertIsInstance(df.index, pd.tseries.period.PeriodIndex)
#try on a wide time seriesd
qs2 = WideTimeSeries.objects.all()
df1 = qs2.to_timeseries(index='date_ix', storage='wide',
freq='M', rs_kwargs=rs_kwargs)
self.assertEqual([d.month for d in qs.dates('date_ix', 'month')],
df1.index.month.tolist())
self.assertIsInstance(df1.index, pd.tseries.period.PeriodIndex)
def test_bad_args_wide_ts(self):
qs = WideTimeSeries.objects.all()
rs_kwargs = {'how': 'sum', 'kind': 'period'}
kwargs = {
'fieldnames': ['col1', 'col2'],
'freq': 'M', 'rs_kwargs': rs_kwargs
}
self.assertRaises(AssertionError, qs.to_timeseries, **kwargs)
kwargs2 = {
'index': 'date_ix',
'fieldnames': ['col1', 'col2'],
'storage': 'big',
'freq': 'M', 'rs_kwargs': rs_kwargs
}
self.assertRaises(AssertionError, qs.to_timeseries, **kwargs2)
def test_bad_args_long_ts(self):
qs = LongTimeSeries.objects.all()
kwargs = {
'index': 'date_ix',
'pivot_columns': 'series_name',
'values' : 'value',
'storage' : 'long'}
kwargs.pop('values')
self.assertRaises(AssertionError, qs.to_timeseries, **kwargs)
kwargs['values'] = 'value'
kwargs.pop('pivot_columns')
self.assertRaises(AssertionError, qs.to_timeseries, **kwargs)
##df = qs.to_timeseries(index='date_ix', pivot_columns='series_name',
##values='value',
##storage='long')
class PivotTableTest(TestCase):
def setUp(self):
self.data = pd.DataFrame({'row_col_a': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'row_col_b': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'row_col_c': ['dull', 'dull',
'shiny', 'dull',
'dull', 'shiny',
'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'value_col_d': np.random.randn(11),
'value_col_e': np.random.randn(11),
'value_col_f': np.random.randn(11)})
create_list = [PivotData(row_col_a=r[1][0], row_col_b=r[1][1],
row_col_c=r[1][2], value_col_d=r[1][3],
value_col_e=r[1][4], value_col_f=r[1][5])
for r in self.data.iterrows()]
PivotData.objects.bulk_create(create_list)
def test_pivot(self):
qs = PivotData.objects.all()
rows = ['row_col_a', 'row_col_b']
cols = ['row_col_c']
pt = qs.to_pivot_table(values='value_col_d', rows=rows, cols=cols)
self.assertEqual(pt.index.names, rows)
self.assertEqual(pt.columns.names, cols)
| bsd-3-clause |
wschenck/nest-simulator | pynest/examples/clopath_synapse_spike_pairing.py | 12 | 5804 | # -*- coding: utf-8 -*-
#
# clopath_synapse_spike_pairing.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Spike pairing experiment
----------------------------------------
This script simulates one ``aeif_psc_delta_clopath`` neuron that is connected with
a Clopath connection [1]_. The synapse receives pairs of a pre- and a postsynaptic
spikes that are separated by either 10 ms (pre before post) or -10 ms (post
before pre). The change of the synaptic weight is measured after five of such
pairs. This experiment is repeated five times with different rates of the
sequence of the spike pairs: 10Hz, 20Hz, 30Hz, 40Hz, and 50Hz.
References
~~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import numpy as np
import matplotlib.pyplot as plt
import nest
##############################################################################
# First we specify the neuron parameters. To enable voltage dependent
# prefactor ``A_LTD(u_bar_bar)`` add ``A_LTD_const: False`` to the dictionary.
nrn_params = {'V_m': -70.6,
'E_L': -70.6,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_minus': 10.0,
'tau_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0,
}
##############################################################################
# Hardcoded spike times of presynaptic spike generator
spike_times_pre = [
# Presynaptic spike before the postsynaptic
[20., 120., 220., 320., 420.],
[20., 70., 120., 170., 220.],
[20., 53.3, 86.7, 120., 153.3],
[20., 45., 70., 95., 120.],
[20., 40., 60., 80., 100.],
# Presynaptic spike after the postsynaptic
[120., 220., 320., 420., 520., 620.],
[70., 120., 170., 220., 270., 320.],
[53.3, 86.6, 120., 153.3, 186.6, 220.],
[45., 70., 95., 120., 145., 170.],
[40., 60., 80., 100., 120., 140.]]
##############################################################################
# Hardcoded spike times of postsynaptic spike generator
spike_times_post = [
[10., 110., 210., 310., 410.],
[10., 60., 110., 160., 210.],
[10., 43.3, 76.7, 110., 143.3],
[10., 35., 60., 85., 110.],
[10., 30., 50., 70., 90.],
[130., 230., 330., 430., 530., 630.],
[80., 130., 180., 230., 280., 330.],
[63.3, 96.6, 130., 163.3, 196.6, 230.],
[55., 80., 105., 130., 155., 180.],
[50., 70., 90., 110., 130., 150.]]
init_w = 0.5
syn_weights = []
resolution = 0.1
##############################################################################
# Loop over pairs of spike trains
for (s_t_pre, s_t_post) in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": resolution})
# Create one neuron
nrn = nest.Create("aeif_psc_delta_clopath", 1, nrn_params)
# We need a parrot neuron since spike generators can only
# be connected with static connections
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generators
spike_gen_pre = nest.Create("spike_generator", 1, {
"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
spike_gen_post = nest.Create("spike_generator", 1, {
"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={
"delay": resolution, "weight": 80.0})
# Create weight recorder
wr = nest.Create('weight_recorder', 1)
# Create Clopath connection with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr})
syn_dict = {"synapse_model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Extract and save synaptic weights
weights = wr.get("events", "weights")
syn_weights.append(weights[-1])
syn_weights = np.array(syn_weights)
# scaling of the weights so that they are comparable to [1]
syn_weights = 100.0*15.0*(syn_weights - init_w)/init_w + 100.0
# Plot results
fig1, axA = plt.subplots(1, sharex=False)
axA.plot([10., 20., 30., 40., 50.], syn_weights[5:], color='b', lw=2.5, ls='-',
label="pre-post pairing")
axA.plot([10., 20., 30., 40., 50.], syn_weights[:5], color='g', lw=2.5, ls='-',
label="post-pre pairing")
axA.set_ylabel("normalized weight change")
axA.set_xlabel("rho (Hz)")
axA.legend()
axA.set_title("synaptic weight")
plt.show()
| gpl-2.0 |
wanggang3333/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
rayNymous/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4agg.py | 70 | 4985 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt4 import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
# Added following line to improve realtime pan/zoom on windows:
QtGui.qApp.processEvents()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
lenovor/dynamic-nmf | unsupervised/nmf.py | 1 | 2502 | import logging as log
import numpy as np
from sklearn import decomposition
from sklearn.externals import joblib
# --------------------------------------------------------------
class SklNMF:
"""
Wrapper class backed by the scikit-learn package NMF implementation.
"""
def __init__( self, max_iters = 100, init_strategy = "random" ):
self.max_iters = 100
self.init_strategy = init_strategy
self.W = None
self.H = None
def apply( self, X, k = 2 ):
"""
Apply NMF to the specified document-term matrix X.
"""
self.W = None
self.H = None
model = decomposition.NMF(init=self.init_strategy, n_components=k, max_iter=self.max_iters)
self.W = model.fit_transform(X)
self.H = model.components_
def rank_terms( self, topic_index, top = -1 ):
"""
Return the top ranked terms for the specified topic, generated during the last NMF run.
"""
if self.H is None:
raise ValueError("No results for previous run available")
# NB: reverse
top_indices = np.argsort( self.H[topic_index,:] )[::-1]
# truncate if necessary
if top < 1 or top > len(top_indices):
return top_indices
return top_indices[0:top]
def generate_partition( self ):
if self.W is None:
raise ValueError("No results for previous run available")
return np.argmax( self.W, axis = 1 ).flatten().tolist()
# --------------------------------------------------------------
def generate_doc_rankings( W ):
'''
Rank document indices, based on values in a W factor matrix produced by NMF.
'''
doc_rankings = []
k = W.shape[1]
for topic_index in range(k):
w = np.array( W[:,topic_index] )
top_indices = np.argsort(w)[::-1]
doc_rankings.append(top_indices)
return doc_rankings
def save_nmf_results( out_path, doc_ids, terms, term_rankings, partition, W, H, topic_labels=None ):
"""
Save output of NMF using Joblib. Note that we use the scikit-learn bundled version of joblib.
"""
# no labels? generate some standard ones
if topic_labels is None:
topic_labels = []
for i in range( len(term_rankings) ):
topic_labels.append( "C%02d" % (i+1) )
log.info( "Saving NMF results to %s" % out_path )
joblib.dump((doc_ids, terms, term_rankings, partition, W, H, topic_labels), out_path )
def load_nmf_results( in_path ):
"""
Load NMF results using Joblib. Note that we use the scikit-learn bundled version of joblib.
"""
(doc_ids, terms, term_rankings, partition, W, H, labels) = joblib.load( in_path )
return (doc_ids, terms, term_rankings, partition, W, H, labels)
| apache-2.0 |
ehrenb/Mercator | Mercator/utils/nx_scripts/path.py | 1 | 1957 | import json
from pprint import pprint
import networkx as nx
from networkx.readwrite import json_graph
import matplotlib.pyplot as plt
graph = None
with open('02e231f85558f37da6802142440736f6/02e231f85558f37da6802142440736f6_component_graph.json') as f:
graph = json.load(f)
nx_graph = json_graph.node_link_graph(graph)
pos = nx.circular_layout(nx_graph, scale=0.2)#k,iterations used to increase distance btwn nodes
nodes = nx_graph.nodes(data=True)
#custom labels:
labels = {}
for (p, d) in nodes:
labels[p] = d['attr_dict']['name']
edge_labels=dict([((u,v,),d['attr_dict']['method'])
for u,v,d in nx_graph.edges(data=True)])
#Determine index of src/dst based on name attribute
source = None
source_class_name = 'Lkrep/itmtd/ywtjexf/UampleUverlayUhowUctivity;'
for (p, d) in nodes:
if d['attr_dict']['name'] == source_class_name:
source = p
break
print("Source: ")
print(source)
dest = None
dest_class_name = 'Lkrep/itmtd/ywtjexf/MasterInterceptor;'
for (p, d) in nodes:
if d['attr_dict']['name'] == dest_class_name:
dest = p
break
print("Dest: ")
print(dest)
# Has path?
print(nx.has_path(nx_graph, source, dest))
# Shorest path
shortest_path = nx.shortest_path(nx_graph, source=source, target=dest)
path_str = ' -> '.join([nodes[i]['attr_dict']['name'] for i in shortest_path])
print(path_str)
shortest_path_edges = list(zip(shortest_path,shortest_path[1:]))
# Path length
print(nx.shortest_path_length(nx_graph, source=source, target=dest))
# Drawing
nx.draw(nx_graph, pos, node_size=60, node_color='k')
nx.draw_networkx_nodes(nx_graph,pos, nodelist=shortest_path, node_color='r')
nx.draw_networkx_edges(nx_graph,pos, edgelist=shortest_path_edges, edge_color='r')
nx.draw_networkx_labels(nx_graph, pos, labels, font_size=8, alpha=0.5)
nx.draw_networkx_edge_labels(nx_graph, pos, edge_labels, font_size=8, alpha=0.5)
plt.axis('equal')
plt.savefig("graph_shortest_path.png")
| mit |
edonyM/EMOCR | refer/Ch02/EXTRAS/createDist2.py | 4 | 2162 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
n = 1000 #number of points to create
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
xcord3 = []; ycord3 = []
markers =[]
colors =[]
fw = open('testSet.txt','w')
for i in range(n):
[r0,r1] = random.standard_normal(2)
myClass = random.uniform(0,1)
if (myClass <= 0.16):
fFlyer = random.uniform(22000, 60000)
tats = 3 + 1.6*r1
markers.append(20)
colors.append(2.1)
classLabel = 1 #'didntLike'
xcord1.append(fFlyer); ycord1.append(tats)
elif ((myClass > 0.16) and (myClass <= 0.33)):
fFlyer = 6000*r0 + 70000
tats = 10 + 3*r1 + 2*r0
markers.append(20)
colors.append(1.1)
classLabel = 1 #'didntLike'
if (tats < 0): tats =0
if (fFlyer < 0): fFlyer =0
xcord1.append(fFlyer); ycord1.append(tats)
elif ((myClass > 0.33) and (myClass <= 0.66)):
fFlyer = 5000*r0 + 10000
tats = 3 + 2.8*r1
markers.append(30)
colors.append(1.1)
classLabel = 2 #'smallDoses'
if (tats < 0): tats =0
if (fFlyer < 0): fFlyer =0
xcord2.append(fFlyer); ycord2.append(tats)
else:
fFlyer = 10000*r0 + 35000
tats = 10 + 2.0*r1
markers.append(50)
colors.append(0.1)
classLabel = 3 #'largeDoses'
if (tats < 0): tats =0
if (fFlyer < 0): fFlyer =0
xcord3.append(fFlyer); ycord3.append(tats)
fw.close()
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.scatter(xcord,ycord, c=colors, s=markers)
type1 = ax.scatter(xcord1, ycord1, s=20, c='red')
type2 = ax.scatter(xcord2, ycord2, s=30, c='green')
type3 = ax.scatter(xcord3, ycord3, s=50, c='blue')
ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2)
ax.axis([-5000,100000,-2,25])
plt.xlabel('Frequent Flyier Miles Earned Per Year')
plt.ylabel('Percentage of Time Spent Playing Video Games')
plt.show()
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
bobbymckinney/seebeck_measurement | old versions/Seebeck_Processing_v4.py | 2 | 22129 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 18 16:07:35 2014
@author: Benjamin Kostreva ([email protected])
__Title__
Description:
Interpolates all of the data from the Seebeck files and calculates dT and
corrected voltages.
Comments:
- The linear interpolation calculates what the data in between time stamps
should be.
Edited by Bobby McKinney 2015-01-12
"""
import numpy as np # for linear fits
# for saving plots
import matplotlib.pyplot as plt
# for creating new folders
import os
###############################################################################
class Process_Data:
''' Interpolates the data in order to get a common timestamp and outputs
time, dT, lowV, highV corrected lists with common timestamps on each
line.
'''
#--------------------------------------------------------------------------
def __init__(self, directory, fileName, tc_type):
self.directory = directory
filePath = directory + '/'+ fileName
ttempA, tempA, ttempB, tempB, thighV, highV, tlowV, lowV, tlowV2, lowV2, thighV2, highV2, ttempB2, tempB2, ttempA2, tempA2, self.indicator = extract_Data(filePath)
print('extracted data from raw file')
self.ttempA = ttempA # first time stamp in a line
self.ttempA2 = ttempA2 # last time stamp in a line
# This will be the common time stamp for each line after interpolation:
self.t = [None]*(len(ttempA)-1)
for x in xrange(1, len(ttempA)):
self.t[x-1] = (self.ttempA[x] + self.ttempA2[x-1])/2
print('find dT')
# Finding dT (after interpolation, at common time):
tempA_int = self.interpolate(ttempA, tempA)
tempA2_int = self.interpolate(ttempA2, tempA2)
for x in xrange(len(tempA_int)):
tempA_int[x] = (tempA_int[x] + tempA2_int[x])/2
tempB_int = self.interpolate(ttempB, tempB)
tempB2_int = self.interpolate(ttempB2, tempB2)
for x in xrange(len(tempA_int)):
tempB_int[x] = (tempB_int[x] + tempB2_int[x])/2
self.dT = [None]*len(tempA_int)
for x in xrange(len(tempA_int)):
self.dT[x] = tempA_int[x] - tempB_int[x]
# Finding avg T:
self.avgT = [None]*len(tempA_int)
for x in xrange(len(tempA_int)):
self.avgT[x] = (tempA_int[x] + tempB_int[x])/2
print('find corrected voltage')
# Voltage Corrections (after interpolation, at common time):
highV_int = self.interpolate(thighV, highV)
highV2_int = self.interpolate(thighV2, highV2)
for x in xrange(len(highV_int)):
highV_int[x] = (highV_int[x] + highV2_int[x])/2
lowV_int = self.interpolate(tlowV, lowV)
lowV2_int = self.interpolate(tlowV2, lowV2)
for x in xrange(len(lowV_int)):
lowV_int[x] = (lowV_int[x] + lowV2_int[x])/2
self.highV_int_corrected = self.voltage_Correction(highV_int, 'high', tc_type)
self.lowV_int_corrected = self.voltage_Correction(lowV_int, 'low', tc_type)
print('calculate seebeck')
# Complete linear fits to the data to find Seebeck coefficients:
low_seebeck, high_seebeck = self.calculate_seebeck(self.extract_measurements())
print('extracting data from fits')
# Extract out the data from the fits in order to write to file later:
temp, self.high_m, self.high_b, self.high_r = self.extract_seebeck_elements(high_seebeck)
self.temp, self.low_m, self.low_b, self.low_r = self.extract_seebeck_elements(low_seebeck)
#end init
#--------------------------------------------------------------------------
def interpolate(self, tdata, data):
''' Interpolates the data in order to achieve a single time-stamp
on each data line.
'''
y0 = data[0]
t0 = tdata[0]
y = [None]*(len(data)-1)
for x in xrange(1, len(data)):
y1 = data[x]
t1 = tdata[x]
t_term = (self.t[x-1] - t0)/(t1 - t0)
# Linear interpolation:
y[x-1] = y0*(1 - t_term) + y1*t_term
y0 = data[x]
t0 = data[x]
#end for
return y
#end def
#--------------------------------------------------------------------------
def voltage_Correction(self, raw_data, side, tc_type):
''' raw_data must be in uV, corrects the voltage measurements from the
thermocouples
'''
# Kelvin conversion for polynomial correction.
avgT_Kelvin = [None]*len(self.avgT)
for x in xrange(len(self.avgT)):
avgT_Kelvin[x] = self.avgT[x] + 273.15
# Correction for effect from Thermocouple Seebeck
v_corrected = [None]*len(avgT_Kelvin)
for x in xrange(len(avgT_Kelvin)):
v_corrected[x] = self.alphacalc(avgT_Kelvin[x], side, tc_type)*self.dT[x] - raw_data[x]
return v_corrected
#end def
#--------------------------------------------------------------------------
def alphacalc(self, x, side, tc_type):
''' x = avgT
alpha in uV/K
'''
if tc_type == "k-type":
### If Chromel, taken from Chromel_Seebeck.txt
if side == 'high':
if ( x >= 270 and x < 700):
alpha = -2467.61114613*x**0 + 55.6028987953*x**1 + \
-0.552110359087*x**2 + 0.00320554346691*x**3 + \
-1.20477254034e-05*x**4 + 3.06344710205e-08*x**5 + \
-5.33914758601e-11*x**6 + 6.30044607727e-14*x**7 + \
-4.8197269477e-17*x**8 + 2.15928374212e-20*x**9 + \
-4.30421084091e-24*x**10
#end if
elif ( x >= 700 and x < 1599):
alpha = 1165.13254764*x**0 + -9.49622421414*x**1 + \
0.0346344390853*x**2 + -7.27785048931e-05*x**3 + \
9.73981855547e-08*x**4 + -8.64369652227e-11*x**5 + \
5.10080771762e-14*x**6 + -1.93318725171e-17*x**7 + \
4.27299905603e-21*x**8 + -4.19761748937e-25*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Chromel)
### If Alumel, taken from Alumel_Seebeck.txt
elif side == 'low':
if ( x >= 270 and x < 570):
alpha = -3465.28789643*x**0 + 97.4007289124*x**1 + \
-1.17546754681*x**2 + 0.00801252041119*x**3 + \
-3.41263237031e-05*x**4 + 9.4391002358e-08*x**5 + \
-1.69831949233e-10*x**6 + 1.91977765586e-13*x**7 + \
-1.2391854625e-16*x**8 + 3.48576207577e-20*x**9
#end if
elif ( x >= 570 and x < 1599):
alpha = 254.644633774*x**0 + -2.17639940109*x**1 + \
0.00747127856327*x**2 + -1.41920634198e-05*x**3 + \
1.61971537881e-08*x**4 + -1.14428153299e-11*x**5 + \
4.969263632e-15*x**6 + -1.27526741699e-18*x**7 + \
1.80403838088e-22*x**8 + -1.23699936952e-26*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Alumel)
else:
print "Error in voltage correction."
#end if (K-type)
return alpha
#end def
#--------------------------------------------------------------------------
def extract_measurements(self):
'''
Returns a list of lists with elements [line number, 'Start'/'Stop' , Temperature]
for each subsequent measurement.
'''
# Extract a list of just 'Start' and 'Stop' (and 'Left' (Equilibrium) if applicable):
h = [None]*len(self.avgT)
for x in xrange(len(self.avgT)):
h[x] = self.indicator[x][:-11]
h = ','.join(h)
h = ''.join(h.split())
h = h.split(',')
h = [x for x in h if x]
# Get number of Measurements:
num = 0
for x in xrange(len(h)-1):
if h[x] == 'Start':
# if the indicator says stop (or stop is overlapped by another start):
if h[x+1] == 'Stop' or h[x+1] == 'Start':
num = num + 1
if h[-1] == 'Start':
num = num + 1
num = num*2 # Both start and stop
# Create a list that records the beginning and end of each measurement:
measurement_indicator = [[None,None,None]]*num # [line num, 'Start/Stop', temp]
s = -1 # iterator for each element of h
n = 0 # iterator to create the elements of measurement_indicator
overlap_indicator = 0 # in case a 'Start' overlaps a 'Stop'
for x in xrange(len(self.avgT)):
if self.indicator[x] == 'Start Oscillation':
s = s + 1 # next element of h
if h[s] == 'Start':
if overlap_indicator == 1:
# if 'Start' overlaps a 'Stop':
measurement_indicator[n] = [x,'Stop',self.avgT[x]]
n = n + 1
overlap_indicator = 0
#end if
try:
if h[s+1] == 'Stop' or h[s+1] == 'Start':
measurement_indicator[n] = [x,'Start',self.avgT[x]]
n = n + 1
if h[s+1] == 'Start':
overlap_indicator = 1
#end try
except IndexError:
# if h[s+1] is out of range, i.e. this is the last 'Start':
measurement_indicator[n] = [x,'Start',self.avgT[x]]
#end except
#end if
#end if
elif self.indicator[x] == 'Stop Oscillation':
measurement_indicator[n] = [x,'Stop',self.avgT[x]]
n = n + 1
# goes to the next element of h:
s = s + 1
#end elif
elif self.indicator[x] == 'Left Equilibrium':
# goes to the next element of h:
s = s + 1
#end elif
#end for
# If we hit the end of the data and there wasn't a 'Stop' indicator:
if measurement_indicator[-1] == [None,None,None]:
last_elem = len(self.avgT)-1
measurement_indicator[-1] = [last_elem,'Stop',self.avgT[last_elem]]
#end if
return measurement_indicator
#end def
#--------------------------------------------------------------------------
def calculate_seebeck(self, measurement_indicator):
'''
Calculates Seebeck for each measurement by finding the slope of a
linear fit to the corrected voltage and dT.
measurement_indicator - list of lists of form [line number, 'Start'/'Stop' , Temperature]
'''
self.dT
self.highV_int_corrected
self.lowV_int_corrected
# number of measurements:
num = len(measurement_indicator)/2
measurement_range = [[None,None,None]]*num # start, stop (indexes), temp
n = 0 # index for this list
for i in xrange(len(measurement_indicator)-1):
#if 'Start':
if measurement_indicator[i][1] == 'Start':
m1 = measurement_indicator[i] # Start
m2 = measurement_indicator[i+1] # Stop
low = m1[0]
high = m2[0]
temp = np.average(self.avgT[low:high+1])
measurement_range[n] = [low, high, temp]
n = n + 1
self.plot_number = 0 # for creating multiple plots without overwriting
lowV_fit = [None]*len(measurement_range)
highV_fit = [None]*len(measurement_range)
for i in xrange(len(measurement_range)):
low = measurement_range[i][0]
high = measurement_range[i][1]
temp = measurement_range[i][2]
x = self.dT[low:high+1]
y_lowV = self.lowV_int_corrected[low-1:high]
y_highV = self.highV_int_corrected[low-1:high]
lowV_fit[i] = self.polyfit(x,y_lowV,1,temp)
highV_fit[i] = self.polyfit(x,y_highV,1,temp)
#celsius = u"\u2103"
celsius = 'C'
self.create_plot(x, y_lowV, y_highV, lowV_fit[i], highV_fit[i], title='%.2f %s' % (temp, celsius) )
return lowV_fit, highV_fit
#end def
#--------------------------------------------------------------------------
def polyfit(self, x, y, degree, temp):
'''
Returns the polynomial fit for x and y of degree degree along with the
r^2 and the temperature, all in dictionary form.
'''
results = {}
coeffs = np.polyfit(x, y, degree)
# Polynomial Coefficients
results['polynomial'] = coeffs.tolist()
# Calculate coefficient of determination (r-squared):
p = np.poly1d(coeffs)
# fitted values:
yhat = p(x) # or [p(z) for z in x]
# mean of values:
ybar = np.sum(y)/len(y) # or sum(y)/len(y)
# regression sum of squares:
ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
# total sum of squares:
sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
results['r-squared'] = ssreg / sstot
results['temperature'] = temp
return results
#end def
#--------------------------------------------------------------------------
def create_plot(self, x, ylow, yhigh, fitLow, fitHigh, title):
self.plot_number += 1
dpi = 400
plt.ioff()
# Create Plot:
fig = plt.figure(self.plot_number, dpi=dpi)
ax = fig.add_subplot(111)
ax.grid()
ax.set_title(title)
ax.set_xlabel("dT (K)")
ax.set_ylabel("dV (uV)")
# Plot data points:
ax.scatter(x, ylow, color='r', marker='.', label="Low Voltage")
ax.scatter(x, yhigh, color='b', marker='.', label="High Voltage")
# Overlay linear fits:
coeffsLow = fitLow['polynomial']
coeffsHigh = fitHigh['polynomial']
p_low = np.poly1d(coeffsLow)
p_high = np.poly1d(coeffsHigh)
xp = np.linspace(min(x), max(x), 5000)
low_eq = 'dV = %.2f*(dT) + %.2f' % (coeffsLow[0], coeffsLow[1])
high_eq = 'dV = %.2f*(dT) + %.2f' % (coeffsHigh[0], coeffsHigh[1])
ax.plot(xp, p_low(xp), '-', c='#FF9900', label="Low Voltage Fit\n %s" % low_eq)
ax.plot(xp, p_high(xp), '-', c='g', label="High Voltage Fit\n %s" % high_eq)
ax.legend(loc='upper left', fontsize='10')
# Save:
plot_folder = self.directory + '/Seebeck Plots/'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
fig.savefig('%s.png' % (plot_folder + title) , dpi=dpi)
plt.close()
#end def
#--------------------------------------------------------------------------
def extract_seebeck_elements(self, definitions):
'''
Extracts the data from the Seebeck fits in order to write to file later.
definitions - ouput of self.calculate_seebeck()
'''
length = len(definitions)
temp = [None]*length # Temperature
m = [None]*length # Slope (Seebeck)
b = [None]*length # offset
r = [None]*length # r-squared
for x in xrange(length):
temp[x] = definitions[x]['temperature']
m[x] = definitions[x]['polynomial'][0]
b[x] = definitions[x]['polynomial'][1]
r[x] = definitions[x]['r-squared']
return temp, m, b, r
#end def
#--------------------------------------------------------------------------
def return_output(self):
return self.t, self.avgT, self.dT, self.lowV_int_corrected, self.highV_int_corrected, self.indicator
#end def
#--------------------------------------------------------------------------
def return_seebeck(self):
# temps are the same for both
return self.temp, self.low_m, self.low_b, self.low_r, self.high_m, self.high_b, self.high_r
#end def
#end class
###############################################################################
#--------------------------------------------------------------------------
def extract_Data(filePath):
f = open(filePath)
loadData = f.read()
f.close()
loadDataByLine = loadData.split('\n')
numericData = loadDataByLine[5:]
# Create lists that are one less than the total number of lines...
# this stops any errors from an incomplete line at the end. :
length = len(numericData)-2
ttempA = [None]*length
tempA = [None]*length
ttempB = [None]*length
tempB = [None]*length
thighV = [None]*length
highV = [None]*length
tlowV = [None]*length
lowV = [None]*length
tlowV2 = [None]*length
lowV2 = [None]*length
thighV2 = [None]*length
highV2 = [None]*length
ttempB2 = [None]*length
tempB2 = [None]*length
ttempA2 = [None]*length
tempA2 = [None]*length
indicator = [None]*length
print('Successfully loaded data by line')
for x in xrange(length):
line = numericData[x].split(',')
ttempA[x] = float(line[0])
tempA[x] = float(line[1])
ttempB[x] = float(line[2])
tempB[x] = float(line[3])
thighV[x] = float(line[4])
highV[x] = float(line[5])
tlowV[x] = float(line[6])
lowV[x] = float(line[7])
tlowV2[x] = float(line[8])
lowV2[x] = float(line[9])
thighV2[x] = float(line[10])
highV2[x] = float(line[11])
ttempB2[x] = float(line[12])
tempB2[x] = float(line[13])
ttempA2[x] = float(line[14])
tempA2[x] = float(line[15])
indicator[x] = line[16]
#end for
print('Successfully split each line of data')
return ttempA, tempA, ttempB, tempB, thighV, highV, tlowV, lowV, tlowV2, lowV2, thighV2, highV2, ttempB2, tempB2, ttempA2, tempA2, indicator
#end def
#--------------------------------------------------------------------------
def create_processed_files(directory, fileName, tc_type):
'''
Writes the output from the Process_Data object into seperate files.
'''
# Make a new folder:
print 'start processing'
print 'directory: '+ directory
print 'fileName: ' + fileName
print 'tc_type: '+ tc_type
Post_Process = Process_Data(directory, fileName, tc_type)
print('data processed')
### Write processed data to a new file:
outFile = directory + '/Processed_Data.csv'
file = outFile # creates a data file
myfile = open(outFile, 'w') # opens file for writing/overwriting
myfile.write('Time (s),Average T (C),dT (K),Low V Corrected (uV),High V Corrected (uV)\n')
time, avgT, dT, lowV, highV, indicator = Post_Process.return_output()
for x in xrange(len(time)):
myfile.write('%.2f,%f,%f,%f,%f,%s\n' % (time[x], avgT[x], dT[x], lowV[x], highV[x], indicator[x]))
myfile.close()
### Write linear fits and calculated Seebeck coefficients to a new file:
seebeck_file = directory + '/Seebeck.csv'
file = seebeck_file
myfile = open(seebeck_file, 'w')
myfile.write('Linear Fit: seebeck*x + offset\n')
myfile.write('\n')
myfile.write('Low (i.e. Alumel):,,,,,High (i.e. Chromel):\n')
table_header = 'Temp (C),Seebeck (uV/K),offset,r^2'
myfile.write('%s,,%s\n' % (table_header,table_header))
temp, low_m, low_b, low_r, high_m, high_b, high_r = Post_Process.return_seebeck()
for x in xrange(len(temp)):
myfile.write('%f,%f,%f,%f,,%f,%f,%f,%f\n' % (temp[x], low_m[x], low_b[x], low_r[x], temp[x], high_m[x], high_b[x], high_r[x]))
myfile.close()
#end def
#==============================================================================
def main():
inFile = 'Data.csv'
directory = '../Google\ Drive/rwm-tobererlab/Seebeck Data 2015-05-25 20.03.24/'
create_processed_files(directory, inFile, "k-type")
#end def
if __name__ == '__main__':
main()
#end if | gpl-3.0 |
krez13/scikit-learn | examples/model_selection/plot_roc.py | 49 | 5041 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
wohllab/milkyway_proteomics | galaxy_milkyway_files/tools/wohl-proteomics/wohl_skyline/job_history_runner.py | 1 | 7434 | #This script is responsible for crawling over Galaxy histories by taking in a
import optparse
import json
import bioblend,sys
import multiprocessing
import pandas
import collections
from bioblend.galaxy import GalaxyInstance
parser = optparse.OptionParser()
#parser.add_option("--job_id",action="store",type="string",dest="job_id")
parser.add_option("--history_id",action="store",type="string",dest="history_id")
parser.add_option("--history_name",action="store",type="string",dest="history_name")
parser.add_option("--job_id",action="store",type="string",dest="job_id")
parser.add_option("--tool_id",action="store",type="string",dest="tool_id")
parser.add_option("--output_file",action="store",type="string",dest="output_file")
(options,args) = parser.parse_args()
galaxy_address='127.0.0.1'
galaxy_API_key='37430b18a3e4610ea243c316b293d06f'
#options.history_name=options.history_name.replace("_-_-_"," ")
gi = GalaxyInstance(galaxy_address, key=galaxy_API_key)
def convert(data):
if isinstance(data, basestring):
return str(data.encode('utf-8'))
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
def checklists(d):
for k in list(d.keys()):
try:
v=d[k]
if isinstance(v, dict) and len(v.keys())>0:
checklists(v)
elif len(v)==0 and not isinstance(v,str):
#if isinstance(v, list):
#print "trimming ",k,"...."
del d[k]
elif isinstance(v,dict) and len(v.keys())==0:
#print "trimming ",k,"...."
del d[k]
elif isinstance(v,list) and len(v)>0:
temp={}
i=1
for each in v:
temp["item_"+str(i)]=each
i+=1
d[k]=temp
except:
#if k=="inputs":
# print k,d
#print "Didn't find ",k,"in",d
pass
#return d
def findMostRecentRun(incoming_tool_id,history_id,all_jobs,interface):
#histories=gi.histories.get_histories()
this_history=gi.histories.show_history(history_id)
#Let's make a list of all the completed datasets in this history.
history_datasets=this_history['state_ids']['running']
for this_job in all_jobs:
if this_job['tool_id']==incoming_tool_id:
job_details=interface.jobs.show_job(this_job['id'],full_details=True)
for each_dataset in job_details['outputs']:
if job_details['outputs'][each_dataset]['id'] in history_datasets:
return this_job['id']
def galaxyDatasetToJobMapper(dataset_id,output,jobs_client,data_client):
if not dataset_id in output:
current_job=jobs_client.show_job(data_client.show_dataset(dataset_id=dataset_id)['creating_job'],full_details=True)
output[dataset_id]=current_job
else:
current_job=output[dataset_id]
#try:
# #current_job=jobs.show_job(all_jobs_dict[dataset_id],full_details=True)
# current_job=all_jobs_dict[dataset_id]
# #print "found job",current_job['id'],"for the dataset",dataset_id
#except:
# #print "We failed to find the dataset_id ",dataset_id,"during the recursive activity..."
# #print "it should have belonged to the job",all_jobs_dict[dataset_id]
# #return {}
if (len(current_job['inputs'])==0):#base case
return output
for each_input_name in current_job['inputs']:
each_input=current_job['inputs'][each_input_name]
output.update(galaxyDatasetToJobMapper(each_input['id'],output,jobs_client,data_client))
return output
def galaxyJobTreeEnumerator(incoming_job_id,jobs_client,data_client):#,all_jobs):
try:
current_job=jobs_client.show_job(incoming_job_id,full_details=True)
except:
print "We failed to find the job_id ",incoming_job_id,"during the preparatory steps..."
return {}
if (len(current_job['inputs'])==0):
return {}
else:
#if all_jobs is None:
# all_jobs=jobs.get_jobs()
dataset_to_job={}
#for each_job in all_jobs:
this_job=jobs_client.show_job(options.job_id,full_details=True)
for each_output in this_job['outputs']:
this_output=this_job['outputs'][each_output]
#dataset_to_job[this_output['id']]=each_job['id']
dataset_to_job[this_output['id']]=this_job
#output={}
output=dataset_to_job
for each_input_name in current_job['inputs']:
each_input=current_job['inputs'][each_input_name]
output.update(galaxyDatasetToJobMapper(each_input['id'],dataset_to_job,jobs_client,data_client))
return output
jobs_client=bioblend.galaxy.jobs.JobsClient(gi)
data_client=bioblend.galaxy.datasets.DatasetClient(gi)
#all_jobs=jobs.get_jobs()
#job_id=findMostRecentRun(options.tool_id,options.history_id,all_jobs,gi)
job_id=options.job_id
print "We found the Rdata node job id, which is...",job_id
dependent_jobs=galaxyJobTreeEnumerator(job_id,jobs_client,data_client)
restructured_by_tool={}
for each_job in dependent_jobs:
this_job=dependent_jobs[each_job]
#print this_job
#sys.exit(2)
if this_job['tool_id'] not in restructured_by_tool:
restructured_by_tool[this_job['tool_id']]={}
#restructured_by_tool[this_job['tool_id']][this_job['id']]={} #change from "each_job" to "this_job['id']
restructured_by_tool[this_job['tool_id']][this_job['update_time']]={} #change from "each_job" to "this_job['id']
#restructured_by_tool[this_job['tool_id']][this_job['id']]['']
if 'job_metrics' in this_job and len(this_job['job_metrics'])>0:
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['jobs_metrics']=this_job['job_metrics']
if 'job_metrics' in this_job:
del this_job['job_metrics'] #This information causes issues...
if 'inputs' in this_job:
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['inputs']=this_job['inputs']
del this_job['inputs']
if 'outputs' in this_job:
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['outputs']=this_job['outputs']
del this_job['outputs']
if 'stdout' in this_job:
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['stdout']=this_job['stdout']
del this_job['stdout']
if 'stderr' in this_job:
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['stderr']=this_job['stderr']
del this_job['stderr']
if 'params' in this_job:
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['params']=this_job['params']
del this_job['params']
restructured_by_tool[this_job['tool_id']][this_job['update_time']]['additional info']=this_job
restructured_by_tool=convert(restructured_by_tool)
checklists(restructured_by_tool)
new_jobs_df=pandas.DataFrame.from_dict(restructured_by_tool)
new_jobs_df.to_csv(open(options.output_file,'w'),sep='\t')
with open(options.output_file,'w') as json_output_handle:
json.dump(restructured_by_tool,json_output_handle,indent=4)
#json.dump(dependent_jobs,json_output_handle,indent=4,separators=("\n",": "))
| mit |
rexshihaoren/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/examples/try_polytrend.py | 33 | 1477 |
from __future__ import print_function
import numpy as np
#import statsmodels.linear_model.regression as smreg
from scipy import special
import statsmodels.api as sm
from statsmodels.datasets.macrodata import data
dta = data.load()
gdp = np.log(dta.data['realgdp'])
from numpy import polynomial
from scipy import special
maxorder = 20
polybase = special.chebyt
polybase = special.legendre
t = np.linspace(-1,1,len(gdp))
exog = np.column_stack([polybase(i)(t) for i in range(maxorder)])
fitted = [sm.OLS(gdp, exog[:, :maxr]).fit().fittedvalues for maxr in
range(2,maxorder)]
print((np.corrcoef(exog[:,1:6], rowvar=0)*10000).astype(int))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gdp, 'o')
for i in range(maxorder-2):
plt.plot(fitted[i])
plt.figure()
#plt.plot(gdp, 'o')
for i in range(maxorder-4, maxorder-2):
#plt.figure()
plt.plot(gdp - fitted[i])
plt.title(str(i+2))
plt.figure()
plt.plot(gdp, '.')
plt.plot(fitted[-1], lw=2, color='r')
plt.plot(fitted[0], lw=2, color='g')
plt.title('GDP and Polynomial Trend')
plt.figure()
plt.plot(gdp - fitted[-1], lw=2, color='r')
plt.plot(gdp - fitted[0], lw=2, color='g')
plt.title('Residual GDP minus Polynomial Trend (green: linear, red: legendre(20))')
#orthonormalize an exog using QR
ex2 = t[:,None]**np.arange(6) #np.vander has columns reversed
q2,r2 = np.linalg.qr(ex2, mode='full')
np.max(np.abs(np.dot(q2.T, q2)-np.eye(6)))
plt.figure()
plt.plot(q2, lw=2)
plt.show()
| bsd-3-clause |
jwiggins/scikit-image | doc/examples/features_detection/plot_windowed_histogram.py | 26 | 5127 | from __future__ import division
"""
========================
Sliding window histogram
========================
Histogram matching can be used for object detection in images [1]_. This
example extracts a single coin from the `skimage.data.coins` image and uses
histogram matching to attempt to locate it within the original image.
First, a box-shaped region of the image containing the target coin is
extracted and a histogram of its greyscale values is computed.
Next, for each pixel in the test image, a histogram of the greyscale values in
a region of the image surrounding the pixel is computed.
`skimage.filters.rank.windowed_histogram` is used for this task, as it employs
an efficient sliding window based algorithm that is able to compute these
histograms quickly [2]_. The local histogram for the region surrounding each
pixel in the image is compared to that of the single coin, with a similarity
measure being computed and displayed.
The histogram of the single coin is computed using `numpy.histogram` on a box
shaped region surrounding the coin, while the sliding window histograms are
computed using a disc shaped structural element of a slightly different size.
This is done in aid of demonstrating that the technique still finds similarity
in spite of these differences.
To demonstrate the rotational invariance of the technique, the same test is
performed on a version of the coins image rotated by 45 degrees.
References
----------
.. [1] Porikli, F. "Integral Histogram: A Fast Way to Extract Histograms
in Cartesian Spaces" CVPR, 2005. Vol. 1. IEEE, 2005
.. [2] S.Perreault and P.Hebert. Median filtering in constant time.
Trans. Image Processing, 16(9):2389-2394, 2007.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data, transform
from skimage.util import img_as_ubyte
from skimage.morphology import disk
from skimage.filters import rank
matplotlib.rcParams['font.size'] = 9
def windowed_histogram_similarity(image, selem, reference_hist, n_bins):
# Compute normalized windowed histogram feature vector for each pixel
px_histograms = rank.windowed_histogram(image, selem, n_bins=n_bins)
# Reshape coin histogram to (1,1,N) for broadcast when we want to use it in
# arithmetic operations with the windowed histograms from the image
reference_hist = reference_hist.reshape((1, 1) + reference_hist.shape)
# Compute Chi squared distance metric: sum((X-Y)^2 / (X+Y));
# a measure of distance between histograms
X = px_histograms
Y = reference_hist
num = (X - Y) ** 2
denom = X + Y
denom[denom == 0] = np.infty
frac = num / denom
chi_sqr = 0.5 * np.sum(frac, axis=2)
# Generate a similarity measure. It needs to be low when distance is high
# and high when distance is low; taking the reciprocal will do this.
# Chi squared will always be >= 0, add small value to prevent divide by 0.
similarity = 1 / (chi_sqr + 1.0e-4)
return similarity
# Load the `skimage.data.coins` image
img = img_as_ubyte(data.coins())
# Quantize to 16 levels of greyscale; this way the output image will have a
# 16-dimensional feature vector per pixel
quantized_img = img // 16
# Select the coin from the 4th column, second row.
# Co-ordinate ordering: [x1,y1,x2,y2]
coin_coords = [184, 100, 228, 148] # 44 x 44 region
coin = quantized_img[coin_coords[1]:coin_coords[3],
coin_coords[0]:coin_coords[2]]
# Compute coin histogram and normalize
coin_hist, _ = np.histogram(coin.flatten(), bins=16, range=(0, 16))
coin_hist = coin_hist.astype(float) / np.sum(coin_hist)
# Compute a disk shaped mask that will define the shape of our sliding window
# Example coin is ~44px across, so make a disk 61px wide (2 * rad + 1) to be
# big enough for other coins too.
selem = disk(30)
# Compute the similarity across the complete image
similarity = windowed_histogram_similarity(quantized_img, selem, coin_hist,
coin_hist.shape[0])
# Now try a rotated image
rotated_img = img_as_ubyte(transform.rotate(img, 45.0, resize=True))
# Quantize to 16 levels as before
quantized_rotated_image = rotated_img // 16
# Similarity on rotated image
rotated_similarity = windowed_histogram_similarity(quantized_rotated_image,
selem, coin_hist,
coin_hist.shape[0])
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
axes[0, 0].imshow(quantized_img, cmap='gray')
axes[0, 0].set_title('Quantized image')
axes[0, 0].axis('off')
axes[0, 1].imshow(coin, cmap='gray')
axes[0, 1].set_title('Coin from 2nd row, 4th column')
axes[0, 1].axis('off')
axes[1, 0].imshow(img, cmap='gray')
axes[1, 0].imshow(similarity, cmap='hot', alpha=0.5)
axes[1, 0].set_title('Original image with overlaid similarity')
axes[1, 0].axis('off')
axes[1, 1].imshow(rotated_img, cmap='gray')
axes[1, 1].imshow(rotated_similarity, cmap='hot', alpha=0.5)
axes[1, 1].set_title('Rotated image with overlaid similarity')
axes[1, 1].axis('off')
plt.show()
| bsd-3-clause |
mrcouts/Nyquist-Attack | Robotics/Simulacao.py | 1 | 1484 | from Serial import *
R = Serial("Rx", '', Matrix([['x'],['y']]).T)
x_ = Matrix([R.qh_,R.ph_])
xn = lambda X:x_.subs([(x_[i],X[i]) for i in range(len(x_)-1,-1,-1)]).evalf()
m = Matrix([1.0])
l = Matrix([0.2])
lg = Matrix([0.1])
Jx = Matrix([0.00333333333333])
g = Matrix([9.8])
PI = Matrix([m,l,lg,Jx,g])
symPI = Matrix([R.m,R.l,R.lg,R.Jx, Matrix([symbols('g')]) ])
PIrep = [(symPI[i], PI[i]) for i in range(len(symPI))]
r = Matrix([sin(t)])
Kp = 100.0*eye(len(R.qh_))
Kv = 20.0*eye(len(R.qh_))
u = ( R.vh_ + R.gh_ + R.Mh_*(r.diff(t,2) + Kv*(r.diff(t) - R.ph_ ) + Kp*(r - R.qh_ ) ) ).subs(PIrep).evalf()
f = simplify(Matrix([R.ph_, R.Mh_**-1 * (u - R.vh_ - R.gh_ )]))
f2 = simplify(f.subs(PIrep).evalf())
f3 = Matrix([f2[i] for i in range(len(f2))])
fn = lambda t,X:f3.subs([(x_[i],X[i]) for i in range(len(x_)-1,-1,-1)]).subs(symbols('t'),t).evalf()
if(True):
from RK import *
import time
t0 = 0
tf = 10
n = 1000
start_time = time.time()
Y = TR('Euler','Euler').TRX(fn, t0, Matrix([0,0]), n, tf, tol=1e-5, nmax_gnr=50,nmax_gss=100)
#Y = RK('RK5').RKX(fn, t0, Matrix([0,0,0,0]), n, tf)
elapsed_time = time.time() - start_time
print(elapsed_time)
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(t0, tf, n)
y = x.copy()
for i in np.arange(np.size(x)):
y[i] = Y[0,i]
plt.figure()
plt.plot(x, y, 'r')
plt.xlabel('x')
plt.ylabel('y')
plt.title('title')
plt.show() | gpl-3.0 |
riemarc/pyinduct | pyinduct/examples/string_with_mass/utils.py | 3 | 10067 | import pyinduct as pi
import numpy as np
import sympy as sp
import time
import os
import pyqtgraph as pg
import matplotlib.pyplot as plt
from pyinduct.visualization import PgDataPlot, get_colors
# matplotlib configuration
plt.rcParams.update({'text.usetex': True})
def pprint(expression="\n\n\n"):
if isinstance(expression, np.ndarray):
expression = sp.Matrix(expression)
sp.pprint(expression, num_columns=180)
def get_primal_eigenvector(according_paper=False):
if according_paper:
# some condensed parameters
alpha = beta = sym.c / 2
tau0 = 1 / sp.sqrt(sym.a * sym.b)
w = tau0 * sp.sqrt((sym.lam + alpha) ** 2 - beta ** 2)
# matrix exponential
expm_A = sp.Matrix([
[sp.cosh(w * sym.z),
(sym.lam + sym.c) / sym.b / w * sp.sinh(w * sym.z)],
[sym.lam / sym.a / w * sp.sinh(w * sym.z),
sp.cosh(w * sym.z)]
])
else:
# matrix
A = sp.Matrix([[sp.Float(0), (sym.lam + sym.c) / sym.b],
[sym.lam/sym.a, sp.Float(0)]])
# matrix exponential
expm_A = sp.exp(A * sym.z)
# inital values at z=0 (scaled by xi(s))
phi0 = sp.Matrix([[sp.Float(1)], [sym.lam / sym.d]])
# solution
phi = expm_A * phi0
return phi
def plot_eigenvalues(eigenvalues, return_figure=False):
plt.figure(facecolor="white")
plt.scatter(np.real(eigenvalues), np.imag(eigenvalues))
ax = plt.gca()
ax.set_xlabel(r"$Re(\lambda)$")
ax.set_ylabel(r"$Im(\lambda)$")
if return_figure:
return ax.get_figure()
else:
plt.show()
def check_eigenvalues(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl, ceq, ss):
# check eigenvalues of the approximation
A_sys = (-ceq[0].dynamic_forms[sys_fem_lbl].e_n_pb_inv @
ceq[0].dynamic_forms[sys_fem_lbl].matrices["E"][0][1])
A_obs = (-ceq[1].dynamic_forms[obs_fem_lbl].e_n_pb_inv @
ceq[1].dynamic_forms[obs_fem_lbl].matrices["E"][0][1])
A_modal_obs = (-ceq[2].dynamic_forms[obs_modal_lbl].e_n_pb_inv @
ceq[2].dynamic_forms[obs_modal_lbl].matrices["E"][0][1])
pprint()
pprint("Eigenvalues [{}, {}, {}]".format(sys_fem_lbl, obs_fem_lbl, obs_modal_lbl))
pprint([np.linalg.eigvals(A_) for A_ in (A_sys, A_obs, A_modal_obs)])
def find_eigenvalues(n):
def characteristic_equation(om):
return om * (np.sin(om) + param.m * om * np.cos(om))
eig_om = pi.find_roots(
characteristic_equation, np.linspace(0, np.pi * n, 5 * n), n)
eig_vals = list(sum([(1j * ev, -1j * ev) for ev in eig_om], ()))
return eig_om, sort_eigenvalues(eig_vals)
def sort_eigenvalues(eigenvalues):
imag_ev = list()
real_ev = list()
for ev in eigenvalues:
if np.isclose(np.imag(ev), 0):
real_ev.append(0 if np.isclose(ev, 0) else np.real(ev))
else:
imag_ev.append(ev)
eig_vals = list(np.flipud(sorted(real_ev)))
for ev in np.array(imag_ev)[np.argsort(np.abs(np.imag(imag_ev)))]:
eig_vals.append(ev)
if len(eigenvalues) != len(eig_vals):
raise ValueError(
"Something went wrong! (only odd number of eigenvalues considered)"
)
return np.array(eig_vals)
class SwmPgAnimatedPlot(PgDataPlot):
"""
Animation for the string with mass example.
Compare with :py:class:`.PgAnimatedPlot`.
Args:
data ((iterable of) :py:class:`.EvalData`): results to animate
title (basestring): window title
refresh_time (int): time in msec to refresh the window must be greater
than zero
replay_gain (float): values above 1 acc- and below 1 decelerate the
playback process, must be greater than zero
save_pics (bool):
labels:
Return:
"""
_res_path = "animation_output"
def __init__(self, data, title="", refresh_time=40, replay_gain=1, save_pics=False, create_video=False,
labels=None):
PgDataPlot.__init__(self, data)
self.time_data = [np.atleast_1d(data_set.input_data[0]) for data_set in self._data]
self.spatial_data = [np.atleast_1d(data_set.input_data[1]) for data_set in self._data]
self.state_data = [data_set.output_data for data_set in self._data]
self._time_stamp = time.strftime("%H:%M:%S")
self._pw = pg.plot(title="-".join([self._time_stamp, title, "at", str(replay_gain)]), labels=labels)
self._pw.addLegend()
self._pw.showGrid(x=True, y=True, alpha=1)
min_times = [min(data) for data in self.time_data]
max_times = [max(data) for data in self.time_data]
self._start_time = min(min_times)
self._end_time = max(max_times)
self._longest_idx = max_times.index(self._end_time)
assert refresh_time > 0
self._tr = refresh_time
assert replay_gain > 0
self._t_step = self._tr / 1000 * replay_gain
spat_min = np.min([np.min(data) for data in self.spatial_data])
spat_max = np.max([np.max(data) for data in self.spatial_data])
self._pw.setXRange(spat_min, spat_max)
state_min = np.min([np.min(data) for data in self.state_data])
state_max = np.max([np.max(data) for data in self.state_data])
self._pw.setYRange(state_min, state_max)
self.save_pics = save_pics
self.create_video = create_video and save_pics
self._export_complete = False
self._exported_files = []
if self.save_pics:
self._exporter = pg.exporters.ImageExporter(self._pw.plotItem)
self._exporter.parameters()['width'] = 1e3
from pyinduct.visualization import create_dir
picture_path = create_dir(self._res_path)
export_digits = int(np.abs(np.round(np.log10(self._end_time // self._t_step), 0)))
# ffmpeg uses c-style format strings
ff_name = "_".join(
[title.replace(" ", "_"), self._time_stamp.replace(":", "_"), "%0{}d".format(export_digits), ".png"])
file_name = "_".join(
[title.replace(" ", "_"), self._time_stamp.replace(":", "_"), "{" + ":0{}d".format(export_digits) + "}",
".png"])
self._ff_mask = os.sep.join([picture_path, ff_name])
self._file_mask = os.sep.join([picture_path, file_name])
self._file_name_counter = 0
self._time_text = pg.TextItem('t= 0')
self._pw.addItem(self._time_text)
self._time_text.setPos(.9 * spat_max, .9 * state_min)
self._plot_data_items = []
self._plot_indexes = []
cls = get_colors(len(self._data))
for idx, data_set in enumerate(self._data):
self._plot_indexes.append(0)
self._plot_data_items.append(pg.PlotDataItem(pen=pg.mkPen(cls[idx], width=2), name=data_set.name))
self._pw.addItem(self._plot_data_items[-1])
angles = np.linspace(0, 2 * np.pi, 1000)
self.x_circle = .01 * (spat_max - spat_min) * np.cos(angles)
self.y_circle = .01 * (state_max - state_min) * np.sin(angles)
for idx, data_set in enumerate(self._data):
self._plot_indexes.append(0)
self._plot_data_items.append(pg.PlotDataItem(pen=pg.mkPen(cls[idx], width=2)))
self._pw.addItem(self._plot_data_items[-1])
self._curr_frame = 0
self._t = self._start_time
self._timer = pg.QtCore.QTimer(self)
self._timer.timeout.connect(self._update_plot)
self._timer.start(self._tr)
def _update_plot(self):
"""
Update plot window.
"""
new_indexes = []
for idx, data_set in enumerate(self._data):
# find nearest time index (0th order interpolation)
t_idx = (np.abs(self.time_data[idx] - self._t)).argmin()
new_indexes.append(t_idx)
# TODO draw grey line if value is outdated
# update data
self._plot_data_items[idx].setData(x=self.spatial_data[idx], y=self.state_data[idx][t_idx])
# circles
self._plot_data_items[idx + len(self._data)].setData(
x=self.x_circle + self.spatial_data[idx][0],
y=self.y_circle + self.state_data[idx][t_idx][0])
self._time_text.setText('t= {0:.2f}'.format(self._t))
self._t += self._t_step
self._pw.setWindowTitle('t= {0:.2f}'.format(self._t))
if self._t > self._end_time:
self._t = self._start_time
if self.save_pics:
self._export_complete = True
print("saved pictures using mask: " + self._ff_mask)
if self.create_video:
from pyinduct.visualization import create_animation
create_animation(input_file_mask=self._ff_mask)
if self.save_pics and not self._export_complete:
if new_indexes != self._plot_indexes:
# only export a snapshot if the data changed
f_name = self._file_mask.format(self._file_name_counter)
self._exporter.export(f_name)
self._exported_files.append(f_name)
self._file_name_counter += 1
self._plot_indexes = new_indexes
@property
def exported_files(self):
if self._export_complete:
return self._exported_files
else:
return None
class Parameters:
def __init__(self):
pass
# parameters
param = Parameters()
param.m = 1
param.tau = 1
param.sigma = 1
obs_gain = Parameters()
obs_gain.k0 = 90
obs_gain.k1 = 100
obs_gain.alpha = 0
ctrl_gain = Parameters()
ctrl_gain.k0 = 2
ctrl_gain.k1 = 2
ctrl_gain.alpha = 0
# symbols
sym = Parameters()
sym.m, sym.lam, sym.tau, sym.om, sym.theta, sym.z, sym.t, sym.tau, sym.sigma = [
sp.Symbol(sym, real=True) for sym in (r"m", r"lambda", r"tau", r"omega", r"theta", r"z", r"t", r"u", r"sigma")]
sym.u, sym.yt = [sp.Function(f) for f in (r"\tilde{y}", r"tau")]
subs_list = [(sym.m, param.m)]
| gpl-3.0 |
AMechler/AliPhysics | PWGLF/NUCLEX/Nuclei/NucleiPbPb/macros_pp13TeV/CorrelationFraction.py | 19 | 2196 | import uproot
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
shift_list = [[1, -2], [2, -1], [3, 1], [4, 2]]
dcaxy_list = [[0, 1.0], [1, 1.4]]
dcaz_list = [[0, 0.5], [1, 0.75], [2, 1.25], [3, 1.50]]
pid_list = [[0, 3.25], [1, 3.5]]
tpc_list = [[0, 60], [1, 65], [2, 75], [3, 80]]
width_list = [[4, -2], [2, -1], [1, +1], [3, 2]]
cuts = {"shift": [shift_list, "Bin shift"], "dcaxy": [dcaxy_list, "$DCA_{xy}$ (mm)"], "dcaz": [dcaz_list, "$DCA_{z}$ (cm)"], "pid": [
pid_list, "$n\sigma_{TPC}$"], "tpc": [tpc_list, "TPC clusters"], "width": [width_list, "Bin width"]}
inFile = uproot.open("spectra.root")
normHist = inFile["nuclei_deuterons_/deuterons/9/Joined/JoinedSpectraM9"]
norm = normHist.values[:-3]
pt = [0.5 * (x + y) for x, y in zip(normHist.edges[:-4], normHist.edges[1:-3])]
colors = cm.rainbow(np.linspace(0, 1, len(norm)))
cMap = plt.get_cmap('jet')
cNorm = matplotlib.colors.Normalize(vmin=min(pt), vmax=max(pt))
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cMap)
for key, record in cuts.items():
x = np.array([])
y = np.array([])
fig,ax = plt.subplots()
for obj_list in record[0]:
obj_label=obj_list[0]
obj_val=obj_list[1]
values = inFile["nuclei_deuterons_{}{}/deuterons/9/Joined/JoinedSpectraM9".format(
key, obj_label)].values[:-3]
values = values / norm
x = np.append(x, np.array([obj_val for _ in range(0, len(values))]))
y = np.append(y, values)
plt.scatter(np.array([obj_val for _ in range(0, len(values))]),
values, color=scalarMap.to_rgba(pt), edgecolors='none')
scalarMap.set_array(pt)
fig.colorbar(scalarMap).set_label("$p_{T}$ (GeV/$c$)")
plt.ylabel("$S_{var}$ / $S_{ref}$")
plt.xlabel(record[1])
ax.tick_params(axis="y",direction="in")
ax.tick_params(axis="x",direction="in")
ax.xaxis.set_label_coords(0.9,-0.07)
ax.yaxis.set_label_coords(-0.115,0.9)
plt.text(0.5, 0.92, 'This work', ha='center', va='center', transform=ax.transAxes, fontweight='bold', fontsize=14)
print(np.corrcoef(x, y))
plt.savefig("{}.pdf".format(key))
# for name, keys in cuts:
| bsd-3-clause |
iut-ibk/DynaMind-ToolBox | DynaMind-BasicModules/scripts/Modules/plotraster.py | 2 | 2932 | """
@file
@author Chrisitan Urich <[email protected]>
@version 1.0
@section LICENSE
This file is part of DynaMind
Copyright (C) 2012 Christian Urich
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from pydynamind import *
from numpy import *
from scipy import *
from matplotlib.pyplot import *
import os
import tempfile
class PlotRaster(Module):
def __init__(self):
Module.__init__(self)
self.vmin = 0
self.vmax = 0
self.createParameter("RasterDataName", STRING, "" )
self.RasterDataName = ""
self.vec = View("dummy", SUBSYSTEM, MODIFY)
self.offsetX = 0
self.OffsetY = 0
views = []
views.append(self.vec)
self.addData("System", views)
self.counter = 0
self.createParameter("Folder", STRING, "")
self.Folder = ""
def run(self):
fig = figure()
index = 1
f = fig.add_subplot(1,1 ,1)
r = self.getRasterData("System",View(self.RasterDataName, RASTERDATA, READ))
f = fig.add_subplot(1,index,1)
f.set_title(self.RasterDataName)
a = array([])
b = []
nameMap = ""
PlotStyle = ""
width = r.getWidth()
height = r.getHeight()
val = []
cval = array([])
a.resize(height, width)
for i in range(width):
for j in range(height):
a[j,i] = r.getValue(i,j) * 1
imshow(a, origin='lower', extent=[0,width,0,height], interpolation='nearest')
colorbar(ax = f, orientation='horizontal')
filename = "plot_"
filename+=str(self.counter).zfill(4)
filename+=".png"
savefig(str(self.Folder)+'/'+filename, dpi=720)
#fig.show()
close()
self.counter+=1
| gpl-2.0 |
eg-zhang/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
esa-as/2016-ml-contest | ar4/classification_utilities.py | 9 | 7383 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
def display_cm(cm, labels, hide_zeros=False,
display_metrics=False):
"""Display confusion matrix with labels, along with
metrics such as Recall, Precision and F1 score.
Based on Zach Guo's print_cm gist at
https://gist.github.com/zachguo/10296432
"""
precision = np.diagonal(cm)/cm.sum(axis=0).astype('float')
recall = np.diagonal(cm)/cm.sum(axis=1).astype('float')
F1 = 2 * (precision * recall) / (precision + recall)
precision[np.isnan(precision)] = 0
recall[np.isnan(recall)] = 0
F1[np.isnan(F1)] = 0
total_precision = np.sum(precision * cm.sum(axis=1)) / cm.sum(axis=(0,1))
total_recall = np.sum(recall * cm.sum(axis=1)) / cm.sum(axis=(0,1))
total_F1 = np.sum(F1 * cm.sum(axis=1)) / cm.sum(axis=(0,1))
#print total_precision
columnwidth = max([len(x) for x in labels]+[5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + " Pred", end=' ')
for label in labels:
print("%{0}s".format(columnwidth) % label, end=' ')
print("%{0}s".format(columnwidth) % 'Total')
print(" " + " True")
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=' ')
for j in range(len(labels)):
cell = "%{0}d".format(columnwidth) % cm[i, j]
if hide_zeros:
cell = cell if float(cm[i, j]) != 0 else empty_cell
print(cell, end=' ')
print("%{0}d".format(columnwidth) % sum(cm[i,:]))
if display_metrics:
print()
print("Precision", end=' ')
for j in range(len(labels)):
cell = "%{0}.2f".format(columnwidth) % precision[j]
print(cell, end=' ')
print("%{0}.2f".format(columnwidth) % total_precision)
print(" Recall", end=' ')
for j in range(len(labels)):
cell = "%{0}.2f".format(columnwidth) % recall[j]
print(cell, end=' ')
print("%{0}.2f".format(columnwidth) % total_recall)
print(" F1", end=' ')
for j in range(len(labels)):
cell = "%{0}.2f".format(columnwidth) % F1[j]
print(cell, end=' ')
print("%{0}.2f".format(columnwidth) % total_F1)
def display_adj_cm(
cm, labels, adjacent_facies, hide_zeros=False,
display_metrics=False):
"""This function displays a confusion matrix that counts
adjacent facies as correct.
"""
adj_cm = np.copy(cm)
for i in np.arange(0,cm.shape[0]):
for j in adjacent_facies[i]:
adj_cm[i][i] += adj_cm[i][j]
adj_cm[i][j] = 0.0
display_cm(adj_cm, labels, hide_zeros,
display_metrics)
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
def compare_facies_plot(logs, compadre, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[6])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im2, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[6].set_xlabel(compadre)
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
ax[6].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
| apache-2.0 |
choderalab/openpathsampling | openpathsampling/numerics/histogram.py | 1 | 26853 | import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import math
from .lookup_function import LookupFunction, VoxelLookupFunction
import collections
import warnings
from functools import reduce
class SparseHistogram(object):
"""
Base class for sparse-based histograms.
Parameters
----------
bin_widths : array-like
bin (voxel) size
left_bin_edges : array-like
lesser side of the bin (for each direction)
"""
def __init__(self, bin_widths, left_bin_edges):
self.bin_widths = np.array(bin_widths)
if left_bin_edges is None:
self.left_bin_edges = None
else:
self.left_bin_edges = np.array(left_bin_edges)
self.count = 0
self.name = None
self._histogram = None
def empty_copy(self):
"""Returns a new histogram with the same bin shape, but empty"""
return type(self)(self.bin_widths, self.left_bin_edges)
def histogram(self, data=None, weights=None):
"""Build the histogram.
Parameters
----------
data : list of list of floats
input data
weights : list of floats
weight for each input data point
Returns
-------
collection.Counter :
copy of the current counter
"""
if data is None and self._histogram is None:
raise RuntimeError("histogram() called without data!")
elif data is not None:
self._histogram = collections.Counter({})
return self.add_data_to_histogram(data, weights)
else:
return self._histogram.copy()
@staticmethod
def sum_histograms(hists):
# (w, r) = (hists[0].bin_width, hists[0].bin_range)
# newhist = Histogram(bin_width=w, bin_range=r)
newhist = hists[0].empty_copy()
newhist._histogram = collections.Counter({})
for hist in hists:
if not newhist.compare_parameters(hist):
raise RuntimeError
newhist.count += hist.count
newhist._histogram += hist._histogram
return newhist
def map_to_float_bins(self, trajectory):
return (np.asarray(trajectory) - self.left_bin_edges) / self.bin_widths
def map_to_bins(self, data):
"""
Parameters
----------
data : np.array
input data
Returns
-------
tuple:
the bin that the data represents
"""
return tuple(np.floor((data - self.left_bin_edges) / self.bin_widths))
def add_data_to_histogram(self, data, weights=None):
"""Adds data to the internal histogram counter.
Parameters
----------
data : list or list of list
input data
weights : list or None
weight associated with each datapoint. Default `None` is same
weights for all
Returns
-------
collections.Counter :
copy of the current histogram counter
"""
if self._histogram is None:
return self.histogram(data, weights)
if weights is None:
weights = [1.0]*len(data)
part_hist = sum((collections.Counter({self.map_to_bins(d) : w})
for (d, w) in zip (data, weights)),
collections.Counter({}))
self._histogram += part_hist
self.count += len(data) if weights is None else sum(weights)
return self._histogram.copy()
@staticmethod
def _left_edge_to_bin_edge_type(left_bins, widths, bin_edge_type):
if bin_edge_type == "l":
return left_bins
elif bin_edge_type == "m":
return left_bins + 0.5 * widths
elif bin_edge_type == "r":
return left_bins + widths
elif bin_edge_type == "p":
pass # TODO: patches; give the range
else:
raise RuntimeError("Unknown bin edge type: " + str(bin_edge_type))
def xvals(self, bin_edge_type):
"""Position values for the bin
Parameters
----------
bin_edge_type : 'l' 'm', 'r', 'p'
type of values to return; 'l' gives left bin edges, 'r' gives
right bin edges, 'm' gives midpoint of the bin, and 'p' is not
implemented, but will give vertices of the patch for the bin
Returns
-------
np.array :
The values of the bin edges
"""
int_bins = np.array(self._histogram.keys())
left_bins = int_bins * self.bin_widths + self.left_bin_edges
return self._left_edge_to_bin_edge_type(left_bins, self.bin_widths,
bin_edge_type)
def __call__(self, bin_edge_type="m"):
return VoxelLookupFunction(left_bin_edges=self.left_bin_edges,
bin_widths=self.bin_widths,
counter=self._histogram)
def normalized(self, raw_probability=False, bin_edge="m"):
"""
Callable normalized version of the sparse histogram.
Parameters
----------
raw_probability : bool
if True, the voxel size is ignored and the sum of the counts
adds to one. If False (default), the sum of the counts times the
voxel volume adds to one.
bin_edge : string
not used; here for compatibility with 1D versions
Returns
-------
:class:`.VoxelLookupFunction`
callable version of the normalized histogram
"""
voxel_vol = reduce(lambda x, y: x.__mul__(y), self.bin_widths)
scale = voxel_vol if not raw_probability else 1.0
norm = 1.0 / (self.count * scale)
counter = collections.Counter({k : self._histogram[k] * norm
for k in self._histogram.keys()})
return VoxelLookupFunction(left_bin_edges=self.left_bin_edges,
bin_widths=self.bin_widths,
counter=counter)
def compare_parameters(self, other):
"""Test whether the other histogram has the same parameters.
Used to check whether we can simply combine these histograms.
Parameters
----------
other : :class:`.SparseHistogram`
histogram to compare with
Returns
-------
bool :
True if these were set up with equivalent parameters, False
otherwise
"""
# None returns false: use that as a quick test
if other == None:
return False
if self.left_bin_edges is None or other.left_bin_edges is None:
# this is to avoid a numpy warning on the next
return self.left_bin_edges is other.left_bin_edges
if self.left_bin_edges != other.left_bin_edges:
return False
if self.bin_widths != other.bin_widths:
return False
return True
class Histogram(SparseHistogram):
"""Wrapper for numpy.histogram with additional conveniences.
In addition to the behavior in numpy.histogram, this provides a few
additional calculations, as well as behavior that allows for better
interactive use (tricks to assist caching by libraries using it, etc.)
"""
def __init__(self, n_bins=None, bin_width=None, bin_range=None):
"""Creates the parameters for the histogram.
Either `n_bins` or `bin_width` must be given. If `bin_width` is
used, then `bin_range` is required. If `n_bins` is used, then
`bin_range` is optional. `n_bins` overrides `bin_width`.
If no options are given, the default is to use 40 bins and the
range generated by np.histogram.
"""
# this is to compare whether another histogram had the same setup,
# and is useful for other programs that want to cache a histogram
self._inputs = [n_bins, bin_width, bin_range]
# regularize options
self.bin_width = None # if not set elsewhere
self.bin_range = None # if not set elsewhere
if bin_range is not None:
max_bin = max(bin_range)
min_bin = min(bin_range)
if bin_width is not None:
self.bin_width = bin_width
self.n_bins = int(math.ceil((max_bin-min_bin)/self.bin_width))
# if this isn't actually divisible, you'll get one extra bin
if n_bins is not None:
self.n_bins = n_bins
self.bin_width = (max_bin-min_bin)/(self.n_bins)
self.bins = [min_bin + self.bin_width*i
for i in range(self.n_bins+1)]
else:
if n_bins is not None:
self.n_bins = n_bins
else:
self.n_bins = 20 # default
self.bins = self.n_bins
try:
left_bin_edges = (self.bins[0],)
except TypeError:
left_bin_edges = None
super(Histogram, self).__init__(bin_widths=(self.bin_width,),
left_bin_edges=left_bin_edges)
def empty_copy(self):
return type(self)(bin_width=self.bin_width, bin_range=self.bin_range)
def histogram(self, data=None, weights=None):
"""Build the histogram based on `data`.
Note
----
Calling this with new data overwrites the previous histogram. This
is the expected behavior; in using this, you should check if the
histogram parameters have changed from a previous run (using
`compare_parameters`) and you should be aware whether your data has
changed. If you want to add data to the histogram, you should use
`add_data_to_histogram`.
"""
if self.left_bin_edges is not None:
return super(Histogram, self).histogram(data, weights)
if data is not None:
max_val = max(data)
min_val = min(data)
self.bin_width = (max_val-min_val)/self.bins
self.left_bin_edges = np.array((min_val,))
self.bin_widths = np.array((self.bin_width,))
return super(Histogram, self).histogram(data, weights)
def xvals(self, bin_edge_type="l"):
int_bins = np.array(list(self._histogram.keys()))[:,0]
# always include left_edge_bin as 0 point; always include 0 and
# greater bin values (but allow negative)
min_bin = min(min(int_bins), 0)
n_bins = max(int_bins) - min_bin + 1
width = self.bin_widths[0]
left_bins = (self.left_bin_edges[0] + np.arange(n_bins) * width)
return self._left_edge_to_bin_edge_type(left_bins, width,
bin_edge_type)
def __call__(self, bin_edge="m"):
"""Return copy of histogram if it has already been built"""
vals = self.xvals(bin_edge)
hist = self.histogram()
bins = sorted(hist.keys())
min_bin = min(bins[0][0], 0)
max_bin = bins[-1][0]
bin_range = range(int(min_bin), int(max_bin)+1)
hist_list = [hist[(b,)] for b in bin_range]
return LookupFunction(vals, hist_list)
def compare_parameters(self, other):
"""Return true if `other` has the same bin parameters as `self`.
Useful for checking whether a histogram needs to be rebuilt.
"""
if not super(Histogram, self).compare_parameters(other):
return False
if type(other.bins) is not int:
if type(self.bins) is int:
return False
for (t, b) in zip(self.bins, other.bins):
if t != b:
return False
else:
return self._inputs == other._inputs
return True
def _normalization(self):
"""Return normalization constant (integral over this histogram)."""
hist = self('l')
bin_edges = self.xvals('l')
dx = [bin_edges[i+1] - bin_edges[i] for i in range(len(bin_edges)-1)]
dx += [dx[-1]] # assume the "forever" bin is same as last limited
norm = np.dot(hist.values(), dx)
return norm
# Yes, the following could be cached. No, I don't think it is worth it.
# Keep in mind that we need a separate cache for each one that we build,
# and that typically it will take almost no time to build one of these
# (runtime in linear in number of histogram bins). Adding caching
# complicates the code for no real benefit (you're more likely to suffer
# from L2 cache misses than to get a speedup).
def normalized(self, raw_probability=False, bin_edge="m"):
"""Return normalized version of histogram.
By default (`raw_probability` false), this returns the histogram
normalized by its integral (according to rectangle-rule
integration). If `raw_probability` is true, this returns the
histogram normalized by the sum of the bin counts, with no
consideration of the bin widths.
"""
normed_hist = self() # returns a copy
nnorm = self._normalization() if not raw_probability else self.count
norm = 1.0/nnorm
normed_hist_list = [normed_hist(k) * norm for k in normed_hist.keys()]
xvals = self.xvals(bin_edge)
return LookupFunction(xvals, normed_hist_list)
def cumulative(self, maximum=1.0, bin_edge="r"):
"""Cumulative from the left: number of values less than bin value.
Use `maximum=None` to get the raw counts.
"""
cumul_hist = []
total = 0.0
hist = self(bin_edge)
for k in sorted(hist.keys()):
total += hist(k)
cumul_hist.append(total)
cumul_hist = np.array(cumul_hist)
if total == 0:
warnings.warn("No non-zero data in the histogram")
elif maximum is not None:
cumul_hist *= maximum / total
xvals = self.xvals(bin_edge)
return LookupFunction(xvals, cumul_hist)
def reverse_cumulative(self, maximum=1.0, bin_edge="l"):
"""Cumulative from the right: number of values greater than bin value.
Use `maximum=None` to get the raw counts.
"""
cumul_hist = []
total = 0.0
hist = self(bin_edge)
for k in reversed(sorted(hist.keys())):
total += hist(k)
cumul_hist.insert(0, total)
cumul_hist = np.array(cumul_hist)
if total == 0:
warnings.warn("No non-zero data in the histogram")
elif maximum is not None:
cumul_hist *= maximum / total
xvals = self.xvals(bin_edge)
return LookupFunction(xvals, cumul_hist)
def rebinned(self, scaling):
"""Redistributes histogram bins of width binwidth*scaling
Exact if scaling is an integer; otherwise uses the assumption that
original bins were uniformly distributed. Note that the original
data is not destroyed.
"""
#TODO
pass
def plot_bins(self, scaling=1.0):
"""Bins used in plotting. Scaling useful when plotting `rebinned`"""
# TODO: add scaling support
return self.bins[1:]
def histograms_to_pandas_dataframe(hists, fcn="histogram", fcn_args={}):
"""Converts histograms in hists to a pandas data frame"""
keys = None
hist_dict = {}
frames = []
for hist in hists:
# check that the keys match
if keys is None:
keys = hist.xvals()
for (t,b) in zip(keys, hist.xvals()):
if t != b:
raise Warning("Bins don't match up")
if hist.name is None:
hist.name = str(hists.index(hist))
hist_data = {
"histogram" : hist,
"normalized" : hist.normalized,
"reverse_cumulative" : hist.reverse_cumulative,
"cumulative" : hist.cumulative,
"rebinned" : hist.rebinned
}[fcn](**fcn_args).values()
bin_edge = {
"histogram" : "m",
"normalized" : "m",
"reverse_cumulative" : "l",
"cumulative" : "r"
}[fcn]
xvals = hist.xvals(bin_edge)
frames.append(pd.DataFrame({hist.name : hist_data}, index=xvals))
all_frames = pd.concat(frames, axis=1)
return all_frames.fillna(0.0)
def write_histograms(fname, hists):
"""Writes all histograms in list `hists` to file named `fname`
If the filename is the empty string, then output is to stdout.
Assumes that all files should have the same bins.
"""
pass
# TODO: might as well add a main function to this; read data / weight from
# stdin and output an appropriate histogram depending on some options. Then
# it is both a useful script and a library class!
class Histogrammer(object):
"""
Basically a dictionary to track what each histogram should be making.
"""
def __init__(self, f, f_args=None, hist_args=None):
self.f = f
self.f_args = f_args
self._hist_args = hist_args
self.empty_hist = Histogram(**self._hist_args)
@property
def hist_args(self):
return self._hist_args
@hist_args.setter
def hist_args(self, val):
self._hist_args = val
self.empty_hist = Histogram(**self._hist_args)
class HistogramPlotter2D(object):
"""
Convenience tool for plotting 2D histograms and plotting data atop them.
The difficulty is that matplotlib uses the row/column *numbers* of a
pandas.DataFrame as the actual internal axis. This class carries all the
information to properly plot things (even mapping to CVs, if the
histogram supports that).
The descriptions below will discuss "real space," "bin space," and
"frame space." Real space refers to the actual values of the input data.
Bin space refers to the bins that come out of that for histogramming
(made into continuous parameters). Frame space is bin space shifted such
that the lowest bin values are 0.
Parameters
----------
histogram : :class:`.SparseHistogram`
input histogram to plot
normed : bool
whether to normalize the histogram (using raw_probability=True)
xticklabels : list of float
the desired locations for plot xticks, in real space
yticklabels : list of float
the desired locations for plot yticks, in real space
xlim : 2-tuple of (float, float)
horizontal (x-value) range of (minimum, maximum) bounds for
displaying the plot
ylim : 2-tuple of (float, float)
vertical (y-value) range of (minimum, maximum) bounds for
displaying the plot
label_format : string
Python format-style string for formatting tick labels. Default is
'{:}'.
"""
def __init__(self, histogram, normed=True, xticklabels=None,
yticklabels=None, xlim=None, ylim=None,
label_format="{:}"):
self.histogram = histogram
self.normed = normed
self.xticklabels = xticklabels
self.yticklabels = yticklabels
self.xlim = xlim
self.ylim = ylim
self.label_format = label_format
self.xticks_, self.xlim_, self.yticks_, self.ylim_ = self.axes_setup(
xticklabels, yticklabels, xlim, ylim
)
def to_bins(self, alist, dof):
"""Convert real-space values to bin-space values for a given dof
Parameters
----------
alist : list of float
input in real-space
dof : integer (0 or 1)
degree of freedom; 0 is x, 1 is y
Returns
-------
list of float :
the outputs in bin-space
"""
left_edge = self.histogram.left_bin_edges[dof]
bin_width = self.histogram.bin_widths[dof]
result = None
if alist is not None:
result = (np.asarray(alist) - left_edge) / bin_width
return result
def axis_input(self, hist, ticklabels, lims, dof):
"""Get ticks, range, and limits for a given DOF
Parameters
----------
hist : list of float
input data from the histogram (bin-space)
ticklabels : list of float or None
user-set tick labels for this DOF (real-space)
lims : 2-tuple (float, float) or None
user-set plot limits for this DOF
dof : integer (0 or 1)
degree of freedom; 0 is x, 1 is y
Returns
-------
ticks_ : list of float or None
user-set ticks in bin-space
range_ : list of float
range for the pandas.DataFrame (bin-space)
lims_ : 2-tuple (float, float)
range for plot visualization (bin-space)
"""
ticks_ = self.to_bins(ticklabels, dof)
lims_ = self.to_bins(lims, dof)
ticks = [] if ticks_ is None else list(ticks_)
lims = [] if lims_ is None else list(lims_)
range_ = (int(min(list(hist) + ticks + lims)),
int(max(list(hist) + ticks + lims)))
if lims_ is None:
lims_ = (0, range_[1] - range_[0])
else:
lims_ = (lims_[0] - range_[0], lims_[1] - range_[0])
return (ticks_, range_, lims_)
def axes_setup(self, xticklabels, yticklabels, xlim, ylim):
"""Set up both x-axis and y-axis for plotting.
Also sets self.xrange\_ and self.yrange\_, which are the (bin-space)
bounds for the pandas.DataFrame.
Parameters
----------
xticklabels : list of float
the desired locations for plot xticks, in real space
yticklabels : list of float
the desired locations for plot yticks, in real space
xlim : 2-tuple of (float, float)
horizontal (x-value) range of (minimum, maximum) bounds for
displaying the plot
ylim : 2-tuple of (float, float)
vertical (y-value) range of (minimum, maximum) bounds for
displaying the plot
Returns
-------
xticks_ : list of float or None
user-set xticks in bin-space
xlim_ : 2-tuple (float, float)
range in x for plot visualization (bin-space)
yticks_ : list of float or None
user-set yticks in bin-space
ylim_ : 2-tuple (float, float)
range in y for plot visualization (bin-space)
"""
if xticklabels is None:
xticklabels = self.xticklabels
if yticklabels is None:
yticklabels = self.yticklabels
if xlim is None:
xlim = self.xlim
if ylim is None:
ylim = self.ylim
x, y = list(zip(*self.histogram._histogram.keys()))
xticks_, xrange_, xlim_ = self.axis_input(x, xticklabels, xlim, dof=0)
yticks_, yrange_, ylim_ = self.axis_input(y, yticklabels, ylim, dof=1)
self.xrange_ = xrange_
self.yrange_ = yrange_
return (xticks_, xlim_, yticks_, ylim_)
def ticks_and_labels(self, ticks, ax, dof):
"""Obtain the plot ticks and tick labels for given dof.
Parameters
----------
ticks : list of float or None
user-set input (bin-space) for tick locations
ax : matplotlib.Axes
axes from the plot
dof : integer (0 or 1)
degree of freedom; 0 is x, 1 is y
Returns
-------
ticks : list of float
tick locations (bin-space, suitable for matplotlib)
labels : list of string
labels for the ticks
"""
if dof == 0:
ax_ticks = ax.get_xticks()
minval = self.xrange_[0]
bw = self.histogram.bin_widths[0]
edge = self.histogram.left_bin_edges[0]
elif dof == 1:
ax_ticks = ax.get_yticks()
minval = self.yrange_[0]
bw = self.histogram.bin_widths[1]
edge = self.histogram.left_bin_edges[1]
else: # pragma: no cover
raise RuntimeError("Bad DOF: "+ str(dof))
to_val = lambda n : (n + minval) * bw + edge
ticks = ticks if ticks is not None else ax_ticks
labels = [self.label_format.format(to_val(n)) for n in ticks]
return (ticks, labels)
def plot(self, normed=None, xticklabels=None, yticklabels=None,
xlim=None, ylim=None, **kwargs):
"""Plot the histogram.
Parameters
----------
normed : bool
whether to normalize the histogram (using raw_probability=True)
xticklabels : list of float
the desired locations for plot xticks, in real space
yticklabels : list of float
the desired locations for plot yticks, in real space
xlim : 2-tuple of (float, float)
horizontal (x-value) range of (minimum, maximum) bounds for
displaying the plot
ylim : 2-tuple of (float, float)
vertical (y-value) range of (minimum, maximum) bounds for
displaying the plot
kwargs :
additional arguments to pass to plt.pcolormesh
Returns
-------
PolyCollection :
return value of plt.pcolormesh
"""
if normed is None:
normed = self.normed
xticks_, xlim_, yticks_, ylim_ = self.axes_setup(
xticklabels, yticklabels, xlim, ylim
)
if normed:
hist_fcn = self.histogram.normalized(raw_probability=True)
else:
hist_fcn = self.histogram()
df = hist_fcn.df_2d(x_range=self.xrange_, y_range=self.yrange_)
self.df = df
mesh = plt.pcolormesh(df.fillna(0.0).transpose(), **kwargs)
(xticks, xlabels) = self.ticks_and_labels(xticks_, mesh.axes, dof=0)
(yticks, ylabels) = self.ticks_and_labels(yticks_, mesh.axes, dof=1)
mesh.axes.set_xticks(xticks)
mesh.axes.set_yticks(yticks)
mesh.axes.set_xticklabels(xlabels)
mesh.axes.set_yticklabels(ylabels)
plt.xlim(xlim_[0], xlim_[1])
plt.ylim(ylim_[0], ylim_[1])
plt.colorbar()
return mesh
def plot_trajectory(self, trajectory, *args, **kwargs):
"""Plot a trajectory (or CV trajectory) on the axes.
Additional arguments pass to plt.plot.
Parameters
----------
trajectory : :class:`.Trajectory` or list of 2-tuple
list to plot; paths.Trajectory allowed if the histogram can
convert it to CVs.
"""
x, y = list(zip(*self.histogram.map_to_float_bins(trajectory)))
px = np.asarray(x) - self.xrange_[0]
py = np.asarray(y) - self.yrange_[0]
plt.plot(px, py, *args, **kwargs)
| lgpl-2.1 |
AlexRobson/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
TheWeiTheTruthAndTheLight/senior-design | src/spark/main.py | 1 | 2814 | import json
import pickle
from pprint import pprint
import re
from os import listdir, SEEK_END
import datetime
import random
from sklearn.linear_model import LogisticRegression
import nlp
import ml
import numpy as np
from dvs import DictVectorizerPartial
import scipy
import pyspark
from pyspark.sql import SQLContext
def filterComments(generator):
import nlp
import ml
list_re = [
r"(\/sarcasm)",
r"(<\/?sarcasm>)",
r"(#sarcasm)",
r"(\s*\/s\s*$)"
]
sarcasm_re = re.compile('|'.join(list_re))
pop = []
for comment in generator:
try:
text = comment['body'].lower()
if 10 <= len(text) <= 120:
if sarcasm_re.search(text) is not None:
yield (True, ml.flattenDict(nlp.feature(comment['body'], nlp.cleanTokensReddit)))
else:
pop.append(comment['body'])
if len(pop) == 1800:
yield (False, ml.flattenDict(nlp.feature(random.choice(pop), nlp.cleanTokensReddit)))
pop = []
except:
pass
def getVocab(gen):
for sarc, features in gen:
for key in features:
yield key
def vectorize(gen, dv):
blocksize = 100000
sarclst = []
featlst = []
for sarc, features in gen:
sarclst.append(sarc)
featlst.append(features)
if len(sarclst) == blocksize:
yield (sarclst, dv.transform(featlst))
sarclst = []
featlst = []
yield (sarclst, dv.transform(featlst))
def train(gen):
for sarclst, matrix in gen:
y = np.array(sarclst)
X = matrix
result = ml.trainTest(X, y, classifiers=[LogisticRegression(n_jobs=-1)], reduce=0, splits=4, trainsize=0.8, testsize=0.2)
print result
yield result
def gerkin(gen):
for result in gen:
yield pickle.dumps(result)
if __name__=='__main__':
sc = pyspark.SparkContext()
sqlContext = SQLContext(sc)
df_rdd = sqlContext.read.format('json').load('/scratch/redditSarcasm/RC_2015-05.json')
print "Read df"
rdd = df_rdd.rdd
print "made rdd"
print "Reducing and transforming"
features = rdd.mapPartitions(filterComments)
print "Done reducing and transforming"
vocab = dict(features.mapPartitions(getVocab).distinct().zipWithIndex().collect())
print "Gathering Vocab"
dvp = DictVectorizerPartial(vocab=vocab)
vdvp = lambda gen: vectorize(gen, dvp)
csrs = features.mapPartitions(vdvp)
print "Collecting and saving X y"
trained = csrs.mapPartitions(train)
dill = trained.mapPartitions(gerkin)
dill.saveAsTextFile('/user/jfeinma00/logistic')
sc.parallelize([pickle.dumps(dvp)]).saveAsTextFile('/user/jfeinma00/dvp')
| mit |
rwgdrummer/maskgen | maskgen/ui/QAExtreme.py | 1 | 40824 | import matplotlib
from maskgen.maskgen_loader import MaskGenLoader
from maskgen.ui.semantic_frame import SemanticFrame
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use("TkAgg")
import logging
from matplotlib.figure import Figure
from Tkinter import *
import matplotlib.patches as mpatches
import ttk
import tkMessageBox
from PIL import ImageTk
from maskgen.support import getValue
from maskgen.tool_set import imageResizeRelative, openImage,get_username, GrayBlockOverlayGenerator, compose_overlay_name
import os
import numpy as np
import maskgen.qa_logic
from maskgen.video_tools import get_end_time_from_segment
import maskgen.tool_set
import random
import maskgen.scenario_model
from maskgen.services.probes import ProbeGenerator, DetermineTaskDesignation, fetch_qaData_designation, cleanup_temporary_files
import maskgen.validation
from maskgen.tool_set import openFile
import webbrowser
from maskgen.graph_meta_tools import MetaDataExtractor
class Chkbox:
def __init__(self, parent, dialog, label=None, command=None, value=False):
self.value = BooleanVar(value=value)
self.box = Checkbutton(parent, variable=self.value, command=dialog.check_ok if command is None else command)
self.label = label
def __nonzero__(self):
return self.value.get()
def set_value(self, value):
self.value.set(value=value)
def grid_info(self):
return self.box.grid_info()
def grid(self):
self.label.grid()
self.box.grid()
def grid_remove(self):
self.box.grid_remove()
self.label.grid_remove()
class CheckboxGroup:
"""
boxes: list of wrapped Checkboxes
condition: either 'all'- all checkboxes in the group must be true or 'any'- any true value will return true.
"""
def __init__(self, boxes = [], condition = 'all'):
self.boxes = boxes
self.condition = condition
def __nonzero__(self):
if len(self.boxes) == 0:
return True
if self.condition == 'any':
return any(bool(value) for value in self.boxes)
else:
return all(bool(value) for value in self.boxes)
def hide_group(self):
for ck in self.boxes:
ck.grid_remove()
def show_group(self):
for ck in self.boxes:
ck.grid()
def grid_info(self, index = -1):
"""
Get the grid_info of the checkbox at the index. default is last index
:return:
"""
return self.boxes[index].grid_info() if len(self.boxes) > 0 else {}
class MannyPage(Frame):
"""
Displays mascot with instructions and status information on probe and QA page generation.
"""
checkboxes = CheckboxGroup()
manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]
def __init__(self, master):
Frame.__init__(self, master)
self.statusLabelText = StringVar()
self.statusLabelText.set('Probes Generating')
self.heading = Label(self, text="Welcome to the QA Wizard. Press Next to begin the QA Process or Quit to stop. This is "
"Manny; He is here to help you analyze the journal. The tool is currently generating the probes. "
"This could take a while. When the next button is enabled you may begin.",
wraplength=400)
self.heading.grid(column=0, row=0, rowspan=2, columnspan=2)
manny_color = maskgen.tool_set.get_icon('Manny_icon_color.jpg')
manny_mask = maskgen.tool_set.get_icon('Manny_icon_mask.jpg')
self.mannyFrame = Frame(self)
self.mannyFrame.grid(column=0, row=2, columnspan=2)
self.canvas = Canvas(self.mannyFrame, width=510, height=510)
self.canvas.pack()
manny_img = openImage(manny_color)
manny_img_mask = openImage(manny_mask).to_mask()
manny_img_mask = imageResizeRelative(manny_img_mask, (500, 500), manny_img_mask.size)
self.manny = ImageTk.PhotoImage(
imageResizeRelative(manny_img, (500, 500), manny_img.size).overlay(manny_img_mask,self.manny_colors[
random.randint(0, len(self.manny_colors) - 1)]).toPIL())
self.image_on_canvas = self.canvas.create_image(510 / 2, 510 / 2, image=self.manny, anchor=CENTER, tag='things')
self.statusLabelObject = Label(self, textvariable=self.statusLabelText)
self.statusLabelObject.grid(column=0, row=3, columnspan=2, sticky=E + W)
self.canvas.bind("<Double-Button-1>", master.help)
self.wquit = Button(self, text='Quit', command=master.exitProgram, width=20)
self.wquit.grid(column=0, row=4, sticky=W, padx=5, pady=5)
self.wnext = Button(self, text='Next', command=master.nex, state=DISABLED, width=20)
self.wnext.grid(column=1, row=4, sticky=E, padx=5, pady=5)
class FinalPage(Frame):
"""
Final QA page, handles comments, final approval.
"""
def __init__(self, master):
Frame.__init__(self, master)
row = 0
col = 0
self.infolabel = Label(self, justify=LEFT, text='QA Checklist:').grid(row=row, column=col)
row += 1
qa_list = [
'Base and terminal node images should be the same format. -If the base was a JPEG, the Create JPEG/TIFF option should be used as the last step.',
'All relevant semantic groups are identified.']
self.checkboxes = CheckboxGroup(boxes=[])
for q in qa_list:
box_label = Label(self, text=q, wraplength=600, justify=LEFT)
ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_state())
ck.box.grid(row=row, column=col)
ck.label.grid(row=row, column=col + 1, sticky='W')
self.checkboxes.boxes.append(ck)
row += 1
master.checkboxes[master.current_qa_page] = self.checkboxes
Label(self, text='QA Signoff: ').grid(row=row, column=col)
col += 1
self.reporterStr = StringVar()
self.reporterStr.set(get_username())
self.reporterEntry = Entry(self, textvar=self.reporterStr)
self.reporterEntry.grid(row=row, column=col, columnspan=3, sticky='W')
row += 2
col -= 1
self.acceptButton = Button(self, text='Accept', command=lambda: master.qa_done('yes'), width=15,
state=DISABLED)
self.acceptButton.grid(row=row, column=col + 2, columnspan=2, sticky='W')
self.rejectButton = Button(self, text='Reject', command=lambda: master.qa_done('no'), width=15)
self.rejectButton.grid(row=row, column=col + 1, columnspan=1, sticky='E')
self.previButton = Button(self, text='Previous', command=master.pre, width=15)
self.previButton.grid(row=row, column=col, columnspan=2, sticky='W')
row += 1
self.commentsLabel = Label(self, text='Comments: ')
self.commentsLabel.grid(row=row, column=col, columnspan=3)
row += 1
textscroll = Scrollbar(self)
textscroll.grid(row=row, column=col + 4, sticky=NS)
self.commentsBox = Text(self, height=5, width=100, yscrollcommand=textscroll.set, relief=SUNKEN)
self.commentsBox.grid(row=row, column=col, padx=5, pady=5, columnspan=3, sticky=NSEW)
textscroll.config(command=self.commentsBox.yview)
currentComment = master.parent.scModel.getProjectData('qacomment')
self.commentsBox.insert(END, currentComment) if currentComment is not None else ''
class QAPage(Frame):
"""
A standard QA Page, allows review and user validation of probe spatial, temporal aspects
"""
#TODO: Refactor to put page data with the page.
"""
subplots = []
pltdata = []
successIcon = None
displays = []
pathboxes = []
"""
def __init__(self, master, link):
Frame.__init__(self, master=master)
self.master = master
self.link = link
self.checkboxes = CheckboxGroup(boxes=[])
#Find this probe- could probably do this elsewhere and pass it in.
self.edgeTuple = tuple(link.split("<-"))
if len(self.edgeTuple) < 2:
self.finalNodeName = link.split("->")[1]
self.edgeTuple = tuple(link.split("->"))
else:
self.finalNodeName = None
if (len(link.split('->'))>1):
probe = [probe for probe in master.probes if
probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.finalNodeId in master.lookup[self.edgeTuple[1]]][0]
else:
probe = \
[probe for probe in master.probes if
probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.donorBaseNodeId in
master.lookup[
self.edgeTuple[1]]][0]
self.probe = probe
iFrame = Frame(self)
c = Canvas(iFrame, width=35, height=35)
c.pack()
#Success Icon
img = openImage(maskgen.tool_set.get_icon('RedX.png') if probe.failure else maskgen.tool_set.get_icon('check.png'))
self.successIcon = ImageTk.PhotoImage(imageResizeRelative(img, (30, 30), img.size).toPIL())
c.create_image(15, 15, image=self.successIcon, anchor=CENTER, tag='things')
#Layout
row = 0
col = 0
self.optionsLabel = Label(self, text=self.link, font=(None, 10))
self.optionsLabel.grid(row=row, columnspan=3, sticky='EW', padx=(40, 0), pady=10)
iFrame.grid(column=0, row=0, columnspan=1, sticky=W)
row += 1
self.operationVar = StringVar(value="Operation [ Semantic Groups ]:")
self.operationLabel = Label(self, textvariable=self.operationVar, justify=LEFT)
self.semanticFrame = SemanticFrame(self)
self.semanticFrame.grid(row=row + 1, column=0, columnspan=2, sticky=N + W, rowspan=1, pady=10)
row += 2
#cImageFrame is used for plot, image and overlay
self.cImgFrame = ttk.Notebook(self)
self.cImgFrame.bind('<<NotebookTabChanged>>', lambda a: self.frameMove())
self.cImgFrame.grid(row=row, rowspan=8)
self.descriptionVar = StringVar()
self.descriptionLabel = Label(self, textvariable=self.operationVar, justify=LEFT)
row += 8
self.operationLabel.grid(row=row, columnspan=3, sticky='W', padx=10)
row += 1
textscroll = Scrollbar(self)
textscroll.grid(row=row, column=col + 1, sticky=NS)
self.commentBox = Text(self, height=5, width=80, yscrollcommand=textscroll.set, relief=SUNKEN)
self.master.commentsBoxes[self.link] = self.commentBox
self.commentBox.grid(row=row, column=col, padx=5, pady=5, columnspan=1, rowspan=2, sticky=NSEW)
textscroll.config(command=self.commentBox.yview)
col = 3
row = 0
scroll = Scrollbar(self)
scroll.grid(row=row, column=col + 2, rowspan=5, columnspan=1, sticky=NS)
self.pathList = Listbox(self, width=30, yscrollcommand=scroll.set, selectmode=EXTENDED, exportselection=0)
self.pathList.grid(row=row, column=col - 1, rowspan=5, columnspan=3, padx=(30, 10), pady=(20, 20))
self.master.pathboxes[self] = self.semanticFrame.getListbox()
scroll.config(command=self.pathList.yview)
self.transitionVar = StringVar()
edge = master.scModel.getGraph().get_edge(probe.edgeId[0], probe.edgeId[1])
self.operationVar.set(self.operationVar.get() + master._compose_label(edge))
master.edges[self] = [edge, self.semanticFrame.getListbox()]
for sg in edge['semanticGroups'] if 'semanticGroups' in edge else []:
self.semanticFrame.insertListbox(ANCHOR, sg)
operation = master.scModel.getGroupOperationLoader().getOperationWithGroups(edge['op'])
#QA checkboxes
if operation.qaList is not None:
args = getValue(edge, 'arguments', {})
self.curOpList = [x for x in operation.qaList]
for item_pos in range(len(self.curOpList)):
item = self.curOpList[item_pos]
try:
self.curOpList[item_pos] = item.format(**args)
except:
pass
else:
self.curOpList = []
row += 5
if self.curOpList is None:
master.qaData.set_qalink_status(self.link, 'yes')
for q in self.curOpList:
box_label = Label(self, text=q, wraplength=250, justify=LEFT)
ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_qalink_status(link=link))
ck.box.grid(row=row, column=col - 1)
ck.label.grid(row=row, column=col, columnspan=4, sticky='W')
self.checkboxes.boxes.append(ck)
row += 1
master.checkboxes[self] = self.checkboxes
# Main Features- load the overlay for images, load plot graph & overlay page for videos
if ('<-' in self.link and probe.donorVideoSegments is None) or probe.targetVideoSegments is None:
self.load_overlay(initialize=True)
else:
self.transitionString(None)
self.setUpFrames()
#Comment section
currentComment = master.qaData.get_qalink_caption(self.link)
self.commentBox.delete(1.0, END)
self.commentBox.insert(END, currentComment if currentComment is not None else '')
#Navigation Buttons
self.acceptButton = Button(self, text='Next', command=master.nex, width=15)
self.acceptButton.grid(row=12, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))
self.prevButton = Button(self, text='Previous', command=master.pre, width=15)
self.prevButton.grid(row=12, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))
self.acceptnButton = Button(self, text='Next Unchecked', command=master.nexCheck, width=15)
self.acceptnButton.grid(row=13, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))
self.prevnButton = Button(self, text='Previous Unchecked', command=master.preCheck, width=15)
self.prevnButton.grid(row=13, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))
row = 14
#Progress Bar
pb = ttk.Progressbar(self, orient='horizontal', mode='determinate', maximum=100.0001)
pb.grid(row=row, column=0, sticky=EW, columnspan=8)
pb.step(master.progress * 100)
master.progressBars.append(pb)
def setUpFrames(self):
"""
Lays out inner display for video temporal and spatial review
:return:
"""
displays = [TemporalReviewDisplay(self)]
if any(segment.filename != None for segment in self.probe.targetVideoSegments):
displays.append(SpatialReviewDisplay(self))
self.checkboxes.boxes.append(CheckboxGroup(boxes=[d.checkbox for d in displays], condition='any'))
self.master.pageDisplays[self] = [0, displays]
def _add_to_listBox(self, box, string):
if len(string) < 20:
box.insert(END, string)
return 1
box.insert(END, string[0:15]+"...")
box.insert(END, " " + string[max(15-int(len(string)),-10):])
return 2
def transitionString(self, probeList):
tab = " "
current = 0
c = 0
if self.finalNodeName == None:
self._add_to_listBox(self.pathList, self.edgeTuple[1])
self.pathList.insert(END, 2*tab + "|")
self.pathList.insert(END, tab + "Donor")
self.pathList.insert(END, 2*tab + "|")
self.pathList.insert(END, 2*tab + "V")
self._add_to_listBox(self.pathList, self.edgeTuple[0])
self.pathList.select_set(6)
return self.edgeTuple[0] + "\n|Donor|\nV\n" + self.edgeTuple[1]
self._add_to_listBox(self.pathList,self.master.backs[self.finalNodeName][0].start)
for p in self.master.backs[self.finalNodeName]:
edge = self.master.scModel.getGraph().get_edge(p.start, p.end)
self.pathList.insert(END, 2 * tab + "|")
c += self._add_to_listBox(self.pathList, edge['op'])
self.pathList.insert(END, 2 * tab + "|")
self.pathList.insert(END, 2 * tab + "V")
c += 3
c += self._add_to_listBox(self.pathList, self.master.getFileNameForNode(p.end))
if self.master.getFileNameForNode(p.end) == self.edgeTuple[0]:
current = c
self.pathList.selection_set(current)
self.pathList.see(max(0,current-5))
return ""
def load_overlay(self, initialize):
"""
Lays out display for spatial overlay for image probes
:param initialize:
:return:
"""
edgeTuple = self.edgeTuple
message = 'final image'
if (len(self.link.split('->')) > 1):
probe = [probe for probe in self.master.probes if
probe.edgeId[1] in self.master.lookup[self.edgeTuple[0]] and probe.finalNodeId in self.master.lookup[
self.edgeTuple[1]]][0]
n = self.master.scModel.G.get_node(probe.finalNodeId)
finalFile = os.path.join(self.master.scModel.G.dir,
self.master.scModel.G.get_node(probe.finalNodeId)['file'])
final = openImage(finalFile)
finalResized = imageResizeRelative(final, (500, 500), final.size)
imResized = imageResizeRelative(probe.targetMaskImage, (500, 500),
probe.targetMaskImage.size if probe.targetMaskImage is not None else finalResized.size)
else:
message = 'donor'
probe = \
[probe for probe in self.master.probes if probe.edgeId[1] in self.master.lookup[edgeTuple[0]] and probe.donorBaseNodeId in self.master.lookup[edgeTuple[1]]][0]
final, final_file = self.master.scModel.G.get_image(probe.donorBaseNodeId)
finalResized = imageResizeRelative(final, (500, 500), final.size)
imResized = imageResizeRelative(probe.donorMaskImage, (500, 500),
probe.donorMaskImage.size if probe.donorMaskImage is not None else finalResized.size)
edge = self.master.scModel.getGraph().get_edge(probe.edgeId[0],probe.edgeId[1])
if initialize is True:
self.c = Canvas(self.cImgFrame, width=510, height=510)
self.c.pack()
self.transitionString(None)
try:
finalResized = finalResized.overlay(imResized)
except IndexError:
tex = self.c.create_text(250,250,width=400,font=("Courier", 20))
self.c.itemconfig(tex, text="The mask of link {} did not match the size of the {}.".format(self.link, message))
return
self.master.photos[self.link] = ImageTk.PhotoImage(finalResized.toPIL())
self.image_on_canvas = self.c.create_image(255, 255, image=self.master.photos[self.link], anchor=CENTER, tag='imgc')
def frameMove(self):
"""
change pages on inner display for videos
:return:
"""
if self in self.master.pageDisplays:
displays = self.master.pageDisplays[self][1]
d_index = self.cImgFrame.index('current')
displays[d_index].checkbox.grid()
for display in displays:
if display != displays[d_index]:
display.checkbox.grid_remove()
def scrollplt(self, *args):
"""
Handle scrolling function on temporal review graph.
:param args:
:return:
"""
if (args[0] == 'moveto'):
na = self.master.pltdata[self]
end = na[-1]
total = end[3]-end[2] + 20000
curframe = self.master.subplots[self].get_children()[1].xaxis.get_view_interval()
space = curframe[1]-curframe[0]
total *= float(args[1])
self.master.subplots[self].get_children()[1].xaxis.set_view_interval(total, total + space, ignore=True)
self.master.subplots[self].canvas.draw()
elif (args[0] == 'scroll'):
self.master.subplots[self].get_children()[1].xaxis.pan(int(args[1]))
self.master.subplots[self].canvas.draw()
def cache_designation(self):
"""
Cache the QA validation of probe designation.
:return:
"""
self.master.check_ok()
displays = self.master.pageDisplays[self][1] if self in self.master.pageDisplays else []
if len(displays) > 0:
validation = {'temporal': bool(displays[0].checkbox), 'spatial': bool(displays[1].checkbox) if len(displays) > 1 else False}
elegibility = [key for key in validation.keys() if validation[key] == True]
designation = '-'.join(elegibility) if len(elegibility) else 'detect'
else:
designation = self.probe.taskDesignation
self.master.qaData.set_qalink_designation(self.link, designation)
class DummyPage(Frame):
def __init__(self, master, labeltext = ''):
Frame.__init__(self, master=master)
self.mainlabel = Label(self, text= labeltext)
self.mainlabel.pack()
self.nextButton = Button(self, text='NEXT', command=master.nex)
self.nextButton.pack()
class SpatialReviewDisplay(Frame):
"""
The spatial review display for video
"""
def __init__(self, page):
Frame.__init__(self, master=page.cImgFrame, height=500,width=50)
page.cImgFrame.add(self, text='Spatial')
self.dialog = self.winfo_toplevel()
#Add Checkbox for spatial review
checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}
chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5
chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4
spatial_box_label = Label(master=page, text='Spatial Overlay Correct?', wraplength=250, justify=LEFT)
self.checkbox = Chkbox(parent=page, dialog=page.master, label=spatial_box_label, command=page.cache_designation,
value=page.master.qaData.get_qalink_designation(page.link) is not None)
self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col -1)
self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')
self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function
if (len(page.link.split('->')) > 1):
probe = [probe for probe in page.master.probes if
probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.finalNodeId in
page.master.lookup[page.edgeTuple[1]]][0]
else:
probe = \
[probe for probe in page.master.probes if
probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.donorBaseNodeId in
page.master.lookup[
page.edgeTuple[1]]][0]
if probe.targetVideoSegments is not None:
to = os.path.join(self.dialog.scModel.get_dir(),probe.finalImageFileName)
overlay_file = compose_overlay_name(target_file=to, link=page.link)
total_range = (probe.targetVideoSegments[0].starttime/1000, probe.targetVideoSegments[-1].endtime/1000)
self.buttonText = StringVar()
self.buttonText.set(value=('PLAY: ' if os.path.exists(overlay_file) else 'GENERATE: ') + os.path.split(overlay_file)[1])
self.playbutton = Button(master=self, textvariable=self.buttonText,
command=lambda: self.openOverlay(probe=probe,
target_file=to,
overlay_path=overlay_file))
self.playbutton.grid(row=0, column=0, columnspan=2, sticky='W')
self.range_label = Label(master=self, text='Range: ' + '{:.2f}'.format(total_range[0]) + 's - ' + '{:.2f}'.format(total_range[1]) + 's')
self.range_label.grid(row=0, column= 3, columnspan = 1, sticky='W')
def openOverlay(self, probe=None, target_file = '', overlay_path=''):
if not os.path.exists(overlay_path):
GrayBlockOverlayGenerator(locator=self.dialog.meta_extractor.getMetaDataLocator(probe.edgeId[0]),
segments=probe.targetVideoSegments,
target_file=target_file, output_file=overlay_path).generate()
self.buttonText.set('PLAY: ' + os.path.split(overlay_path)[1])
openFile(overlay_path)
class TemporalReviewDisplay(Frame):
"""
The temporal review display for video
"""
def __init__(self, page):
Frame.__init__(self, master=page.cImgFrame)
page.cImgFrame.add(self, text='Temporal')
# Add Checkbox for spatial review
checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}
chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5
chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4
temporal_box_label = Label(master=page, text='Temporal data correct?', wraplength=250, justify=LEFT)
self.checkbox = Chkbox(parent=page, dialog=page.master, label=temporal_box_label, command=page.cache_designation,
value=page.master.qaData.get_qalink_designation(page.link) is not None)
self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col - 1)
self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')
self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function
ps = [mpatches.Patch(color="red", label="Target Video"),
mpatches.Patch(color="blue", label="Current Manipulations"),
mpatches.Patch(color="green", label="Other Manipulations")]
data = []
f = Figure(figsize=(6, 4), dpi=100)
subplot = f.add_subplot(111)
subplot.legend(handles=ps, loc=8)
prolist = []
maxtsec = 0
for probe in page.master.probes:
maxtsec = max(maxtsec, probe.max_time())
if (page.finalNodeName == None):
if probe.donorBaseNodeId is not None and page.master.getFileNameForNode(probe.donorBaseNodeId) == \
page.edgeTuple[
1]:
prolist.append(probe)
else:
if (page.master.getFileNameForNode(probe.finalNodeId) == page.edgeTuple[1]):
prolist.append(probe)
try:
tsec = get_end_time_from_segment(
page.master.meta_extractor.getMetaDataLocator(page.master.lookup[page.edgeTuple[1]][0]).getMaskSetForEntireVideo(
media_types=probe.media_types())[0]) / 1000.0
except Exception as ex:
logging.getLogger("maskgen").error(ex.message)
logging.getLogger("maskgen").error(
"{} Duration could not be found the length displayed in the graph is incorrect".format(
page.edgeTuple[1]))
tsec = maxtsec
ytics = []
ytic_lbl = []
count = 0
high = 0
low = tsec * 1000 + 20000
for probe in prolist:
count += 1
col = 2
cur = False
if (probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]]):
col = 1
cur = True
if page.finalNodeName == None:
for mvs in probe.donorVideoSegments if probe.donorVideoSegments is not None else []:
data.append([count, col, mvs.starttime, mvs.endtime])
if cur:
high = max(high, mvs.endtime)
low = min(low, mvs.starttime)
subplot.text(mvs.starttime - 100, count - 0.5, "F:" + str(int(mvs.startframe)),
{'size': 10})
subplot.text(mvs.endtime + 100, count - 0.5, "F:" + str(int(mvs.endframe)), {'size': 10})
subplot.text(mvs.starttime - 100, count - 0.20, "T:" + str(int(mvs.starttime)),
{'size': 10})
subplot.text(mvs.endtime + 100, count - 0.20, "T:" + str(int(mvs.endtime)), {'size': 10})
else:
for mvs in probe.targetVideoSegments if probe.targetVideoSegments is not None else []:
data.append([count, col, mvs.starttime, mvs.endtime])
if cur:
high = max(high, mvs.endtime)
low = min(low, mvs.starttime)
subplot.text(mvs.starttime, count - 0.5, "F:" + str(int(mvs.startframe)), {'size': 10})
subplot.text(mvs.endtime, count - 0.5, "F:" + str(int(mvs.endframe)), {'size': 10})
subplot.text(mvs.starttime, count - 0.20, "T:" + str(int(mvs.starttime)), {'size': 10})
subplot.text(mvs.endtime, count - 0.20, "T:" + str(int(mvs.endtime)), {'size': 10})
ytics.append(count)
ytic_lbl.append(str(page.master.abreive(probe.edgeId[0])))
color_mapper = np.vectorize(lambda x: {0: 'red', 1: 'blue', 2: 'green'}.get(x))
data.append([count + 1, 0, 0.0, tsec * 1000.0])
ytics.append(count + 1)
ytic_lbl.append(page.master.abreive(page.edgeTuple[1]))
numpy_array = np.array(data)
subplot.hlines(numpy_array[:, 0], numpy_array[:, 2], numpy_array[:, 3], color_mapper(numpy_array[:, 1]),
linewidth=10)
subplot.set_yticks(ytics)
subplot.set_yticklabels(ytic_lbl)
subplot.set_xlabel('Time in Milliseconds')
subplot.grid()
i = subplot.yaxis.get_view_interval()
if (i[1] - i[0] < 10):
i[0] = i[1] - 8
subplot.yaxis.set_view_interval(i[0], i[1])
i = subplot.xaxis.get_view_interval()
if (i[1] - i[0] > 2000):
i[0] = low - 1000
i[1] = high + 1000
subplot.xaxis.set_view_interval(i[0], i[1])
page.master.pltdata[page] = numpy_array
canvas = Canvas(self, height=50, width=50)
imscroll = Scrollbar(self, orient=HORIZONTAL)
imscroll.grid(row=1, column=0, sticky=EW)
imscroll.config(command=page.scrollplt)
fcanvas = FigureCanvasTkAgg(f, master=canvas)
fcanvas.show()
fcanvas.get_tk_widget().grid(row=0, column=0)
fcanvas._tkcanvas.grid(row=0, column=0)
canvas.grid(row=0, column=0)
canvas.config(height=50, width=50)
page.master.subplots[page] = f
class QAProjectDialog(Toplevel):
"""
Host window for QA pages
"""
manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]
def __init__(self, parent):
self.parent = parent
self.scModel = parent.scModel
self.meta_extractor = MetaDataExtractor(parent.scModel.getGraph())
self.probes = None
Toplevel.__init__(self, parent)
self.type = self.parent.scModel.getEndType()
self.pages = []
self.current_qa_page = None
self.checkboxes = {} #Checkboxes, keyed by page
self.backs = {}
self.lookup = {}
self.subplots ={}
self.pltdata = {}
self.backsProbes={}
self.photos = {}
self.commentsBoxes = {}
self.edges = {}
self.qaList = []
self.pathboxes = {}
self.qaData = maskgen.qa_logic.ValidationData(self.scModel)
self.resizable(width=False, height=False)
self.progressBars = []
self.narnia = {}
self.pageDisplays = {} #Frames that go inside pages, keyed by page.
self.valid = False
self.mannypage = MannyPage(self)
self.switch_frame(self.mannypage)
self.lastpage = None #Assigned in generate Pages
self.pages.append(self.mannypage)
self.getProbes()
if self.probes is None:
self.mannypage.statusLabelText.set('Probe Generation failed. Please consult logs for more details.')
self.parent.update()
else:
self.errors = [p for p in self.probes if p.failure]
if len(self.errors) > 0:
self.mannypage.statusLabelText.set('Probes Complete with errors. Generating Preview Pages.')
else:
self.mannypage.statusLabelText.set('Probes Complete. Generating Preview Pages.')
self.generate_pages()
def getProbes(self):
try:
generator = ProbeGenerator(
scModel=self.scModel,
processors=[
DetermineTaskDesignation(
scModel=self.scModel,
inputFunction=fetch_qaData_designation)])
self.probes = generator(saveTargets=False, keepFailures=True)
except Exception as e:
logging.getLogger('maskgen').error(str(e))
self.probes = None
def getFileNameForNode(self, nodeid):
try:
fn = self.scModel.getFileName(nodeid)
if fn not in self.lookup:
self.lookup[fn] = []
if nodeid not in self.lookup[fn]:
self.lookup[fn].append(nodeid)
except TypeError:
fn = None
logging.getLogger('maskgen').warn("Unable to locate File for node with Id {}".format(nodeid))
return fn
def pre(self):
self.move(-1,False)
def nex(self):
self.move(1, False)
def exitProgram(self):
self.destroy()
cleanup_temporary_files(probes=self.probes, scModel=self.scModel)
def help(self,event):
URL = MaskGenLoader.get_key("apiurl")[:-3] + "journal"
webbrowser.open_new(URL)
def generate_pages(self):
self.crit_links = ['->'.join([self.getFileNameForNode(p.edgeId[1]), self.getFileNameForNode(p.finalNodeId)]) for
p in self.probes] if self.probes else []
self.crit_links = list(set(self.crit_links))
self.finNodes = []
for x in range(0, len(self.crit_links)):
for y in range(x, len(self.crit_links)):
link1 = self.crit_links[x]
link2 = self.crit_links[y]
fin1 = link1.split("->")[1]
fin2 = link2.split("->")[1]
self.finNodes.append(fin2)
if (fin1 > fin2):
self.crit_links[x] = self.crit_links[y]
self.crit_links[y] = link1
self.finNodes = list(set(self.finNodes))
for end in self.finNodes:
for node in self.lookup[end]:
if node in self.scModel.finalNodes():
break
self.backs[end] = []
next = self.getPredNode(node)
while next != None:
node = next.start
self.backs[end].append(next)
next = self.getPredNode(node)
self.backs[end].reverse()
donors = ['<-'.join([self.getFileNameForNode(p.edgeId[1]), self.getFileNameForNode(p.donorBaseNodeId)]) for p in
self.probes if
p.donorMaskImage is not None or p.donorVideoSegments is not None] if self.probes else []
donors = set(sorted(donors))
self.crit_links.extend([x for x in donors])
count = 0.0
for k in self.qaData.keys():
count += 1 if self.qaData.get_qalink_status(k) == 'yes' else 0
self.progress = count / len(self.crit_links) if len(self.crit_links) != 0 else 0.99999
count = 1
for link in self.crit_links:
self.pages.append(QAPage(master=self, link=link))
count += 1
self.lastpage = FinalPage(self)
self.pages.append(self.lastpage)
self.mannypage.statusLabelText.set('Preview Pages Complete. Press Next to Continue.')
self.mannypage.wnext.config(state=NORMAL)
def validategoodtimes(self):
v = self.scModel.validate()
if maskgen.validation.core.hasErrorMessages(v, lambda x: True):
self.valid = False
tkMessageBox.showerror("Validation Errors!","It seems this journal has unresolved validation errors. "
"Please address these and try again. Your QA progress will be saved.")
else:
self.valid = True
self.check_ok()
def abreive(self,str):
if (len(str)>10):
return(str[:5]+ "...\n" + str[-6:])
else:
return str
def _add_to_listBox(self, box, string):
if len(string) < 20:
box.insert(END, string)
return 1
box.insert(END, string[0:15]+"...")
box.insert(END, " " + string[max(15-int(len(string)),-10):])
return 2
def _compose_label(self,edge):
op = edge['op']
if 'semanticGroups' in edge and edge['semanticGroups'] is not None:
groups = edge['semanticGroups']
op += ' [' + ', '.join(groups) + ']'
self.descriptionVar = edge['description']
return op
def nexCheck(self):
self.move(1,True)
def preCheck(self):
self.move(-1,True)
def switch_frame(self, frame):
if self.current_qa_page != None:
self.current_qa_page.grid_forget()
self.current_qa_page = frame
self.current_qa_page.grid()
def move(self, dir, checked):
if self.current_qa_page in self.edges.keys():
self.edges[self.current_qa_page][0]['semanticGroups'] = self.edges[self.current_qa_page][1].get(0, END)
finish = True
if self.current_qa_page in self.checkboxes.keys():
for box in self.checkboxes[self.current_qa_page].boxes:
if bool(box) is False:
finish = False
break
#caching in qaData
ind = self.pages.index(self.current_qa_page)
step = 0
if 0<=ind-1<len(self.crit_links):
if finish and self.crit_links[ind-1] in self.qaData.keys():
if self.qaData.get_qalink_status(self.crit_links[ind-1]) == 'no':
step += 1.0/len(self.crit_links)*100
self.qaData.set_qalink_status(self.crit_links[ind-1],'yes')
self.qaData.set_qalink_caption(self.crit_links[ind-1],self.commentsBoxes[self.crit_links[ind-1]].get(1.0, END).strip())
self.current_qa_page.cache_designation()
if not finish:
if self.qaData.get_qalink_status(self.crit_links[ind-1]) == 'yes':
step += -1.0/len(self.crit_links)*100
self.qaData.set_qalink_status(self.crit_links[ind - 1], 'no')
self.qaData.set_qalink_caption(self.crit_links[ind - 1], self.commentsBoxes[self.crit_links[ind - 1]].get(1.0, END).strip())
for p in self.progressBars:
p.step(step)
i = self.pages.index(self.current_qa_page) + dir
if not 0<=i<len(self.pages):
return
nex = self.current_qa_page
while checked:
nex = self.pages[i]
finish = True
if nex in self.checkboxes.keys():
for t in self.checkboxes[nex]:
if t.get() is False:
finish = False
break
if i == len(self.pages)-1 or i == 0:
break
if not finish:
break
i += dir
self.switch_frame(self.pages[i])
def qa_done(self, qaState):
self.qaData.update_All(qaState, self.lastpage.reporterStr.get(), self.lastpage.commentsBox.get(1.0, END), None)
self.parent.scModel.save()
self.destroy()
cleanup_temporary_files(probes=self.probes, scModel=self.scModel)
def getPredNode(self, node):
for pred in self.scModel.G.predecessors(node):
edge = self.scModel.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.scModel.getModificationForEdge(pred, node)
return None
def check_ok(self, event=None):
if self.lastpage != None:
if len(self.errors) == 0 and all(bool(page.checkboxes) for page in self.pages):
self.lastpage.acceptButton.config(state=NORMAL)
else:
self.lastpage.acceptButton.config(state=DISABLED)
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
joernhees/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/numpy/core/tests/test_multiarray.py | 3 | 259121 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
from contextlib import contextmanager
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1)
tinya = np.nextafter(np.longdouble(0), -1)
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
# New in 1.12: This behavior changes in 1.13, test for dep warning
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
with assert_warns(FutureWarning):
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
arr = np.empty(1000, 'V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
arr = np.empty(1000, dt)
arr[::-1].sort()
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 5).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 5, dtype=dtype)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
class TestTemporaryElide(TestCase):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=np.bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(TestCase):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
self.assertRaises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, b'', np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(ValueError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = u'b'
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = u'b'
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
with suppress_warnings() as sup:
sup.filter(FutureWarning,
"Assignment between structured arrays.*")
sup.filter(FutureWarning,
"Numpy has detected that you .*")
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
(2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
(3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
(2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
# For <=1.12 a is not modified, but it will be in 1.13
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
# make sure views of a multi-field index warn too
c = np.zeros(3, dtype='i8,i8,i8')
assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
[FutureWarning])
# make sure assignment using a different dtype warns
a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert _mean(np.ones(100000, dtype='float16')) == 1
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(TestCase):
def test_basic(self):
m = np.array([1, 2, 3])
self.assertEqual(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
self.assertEqual(np.alen(m), 2)
m = [1, 2, 3]
self.assertEqual(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
self.assertEqual(np.alen(m), 2)
def test_singleton(self):
self.assertEqual(np.alen(5), 1)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(TestCase):
def setUp(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible(object):
def __bool__(self):
raise NotImplementedError
__nonzero__ = __bool__ # python 2
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
self_containing = np.array([None])
self_containing[0] = self_containing
try:
Error = RecursionError
except NameError:
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.))
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for i in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
# See #7949. Dont use "/" operator With -3 switch, since python reports it
# as a DeprecationWarning
if sys.version_info[0] < 3 and not sys.py3kwarning:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestCTypes(TestCase):
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertEqual(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
from numpy.core import _internal
_internal.ctypes = None
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertIsInstance(
test_arr.ctypes._ctypes, _internal._missing_ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual(object):
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
if __name__ == "__main__":
run_module_suite()
| bsd-2-clause |
TomAugspurger/pandas | pandas/tests/frame/methods/test_rename.py | 1 | 12492 | from collections import ChainMap
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
class TestRename:
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
@pytest.mark.parametrize(
"args,kwargs",
[
((ChainMap({"A": "a"}, {"B": "b"}),), dict(axis="columns")),
((), dict(columns=ChainMap({"A": "a"}, {"B": "b"}))),
],
)
def test_rename_chainmap(self, args, kwargs):
# see gh-23859
colAData = range(1, 11)
colBdata = np.random.randn(10)
df = DataFrame({"A": colAData, "B": colBdata})
result = df.rename(*args, **kwargs)
expected = DataFrame({"a": colAData, "b": colBdata})
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert "FOO" in renamed
assert "foo" not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis="columns")
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis="index")
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index(
["A", "B"]
)
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(index=str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, index=str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, index=str.lower, columns=str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_rename_positional_raises(self):
# GH 29136
df = DataFrame(columns=["A", "B"])
msg = r"rename\(\) takes from 1 to 2 positional arguments"
with pytest.raises(TypeError, match=msg):
df.rename(None, str.lower)
def test_rename_no_mappings_raises(self):
# GH 29136
df = DataFrame([[1]])
msg = "must pass an index to rename"
with pytest.raises(TypeError, match=msg):
df.rename()
with pytest.raises(TypeError, match=msg):
df.rename(None, index=None)
with pytest.raises(TypeError, match=msg):
df.rename(None, columns=None)
with pytest.raises(TypeError, match=msg):
df.rename(None, columns=None, index=None)
def test_rename_mapper_and_positional_arguments_raises(self):
# GH 29136
df = DataFrame([[1]])
msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=msg):
df.rename({}, index={})
with pytest.raises(TypeError, match=msg):
df.rename({}, columns={})
with pytest.raises(TypeError, match=msg):
df.rename({}, columns={}, index={})
| bsd-3-clause |
paultcochrane/bokeh | bokeh/charts/builder/timeseries_builder.py | 26 | 6252 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): a 2d iterable containing the values. Can be anything that
can be converted to a 2d array, and which is the x (time) axis is determined
by ``index``, while the others are interpreted as y values.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
menpo/menpo | menpo/image/base.py | 2 | 131364 | from typing import Iterable, Optional
from warnings import warn
import PIL.Image as PILImage
import numpy as np
from menpo.base import MenpoDeprecationWarning, Vectorizable, copy_landmarks_and_path
from menpo.landmark import Landmarkable
from menpo.shape import PointCloud, bounding_box
from menpo.transform import (
AlignmentUniformScale,
Homogeneous,
NonUniformScale,
Rotation,
Translation,
scale_about_centre,
transform_about_centre,
)
from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable
from .interpolation import scipy_interpolation
try:
from .interpolation import cv2_perspective_interpolation
except ImportError:
warn("Falling back to scipy interpolation for affine warps")
cv2_perspective_interpolation = None # type: ignore
from .patches import (
extract_patches_with_slice,
set_patches,
extract_patches_by_sampling,
)
# Cache the greyscale luminosity coefficients as they are invariant.
_greyscale_luminosity_coef: Optional[np.ndarray] = None
class ImageBoundaryError(ValueError):
r"""
Exception that is thrown when an attempt is made to crop an image beyond
the edge of it's boundary.
Parameters
----------
requested_min : ``(d,)`` `ndarray`
The per-dimension minimum index requested for the crop
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index requested for the crop
snapped_min : ``(d,)`` `ndarray`
The per-dimension minimum index that could be used if the crop was
constrained to the image boundaries.
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index that could be used if the crop was
constrained to the image boundaries.
"""
def __init__(self, requested_min, requested_max, snapped_min, snapped_max):
super(ImageBoundaryError, self).__init__()
self.requested_min = requested_min
self.requested_max = requested_max
self.snapped_min = snapped_min
self.snapped_max = snapped_max
def indices_for_image_of_shape(shape):
r"""
The indices of all pixels in an image with a given shape (without
channel information).
Parameters
----------
shape : ``(n_dims, n_pixels)`` `ndarray`
The shape of the image.
Returns
-------
indices : `ndarray`
The indices of all the pixels in the image.
"""
return np.indices(shape).reshape([len(shape), -1]).T
def normalize_pixels_range(pixels, error_on_unknown_type=True):
r"""
Normalize the given pixels to the Menpo valid floating point range, [0, 1].
This is a single place to handle normalising pixels ranges. At the moment
the supported types are uint8 and uint16.
Parameters
----------
pixels : `ndarray`
The pixels to normalize in the floating point range.
error_on_unknown_type : `bool`, optional
If ``True``, this method throws a ``ValueError`` if the given pixels
array is an unknown type. If ``False``, this method performs no
operation.
Returns
-------
normalized_pixels : `ndarray`
The normalized pixels in the range [0, 1].
Raises
------
ValueError
If ``pixels`` is an unknown type and ``error_on_unknown_type==True``
"""
dtype = pixels.dtype
if dtype == np.uint8:
max_range = 255.0
elif dtype == np.uint16:
max_range = 65535.0
else:
if error_on_unknown_type:
raise ValueError(
"Unexpected dtype ({}) - normalisation range "
"is unknown".format(dtype)
)
else:
# Do nothing
return pixels
# This multiplication is quite a bit faster than just dividing - will
# automatically cast it up to float64
return pixels * (1.0 / max_range)
def denormalize_pixels_range(pixels, out_dtype):
"""
Denormalize the given pixels array into the range of the given out dtype.
If the given pixels are floating point or boolean then the values
are scaled appropriately and cast to the output dtype. If the pixels
are already the correct dtype they are immediately returned.
Floating point pixels must be in the range [0, 1].
Currently uint8 and uint16 output dtypes are supported.
Parameters
----------
pixels : `ndarray`
The pixels to denormalize.
out_dtype : `np.dtype`
The numpy data type to output and scale the values into.
Returns
-------
out_pixels : `ndarray`
Will be in the correct range and will have type ``out_dtype``.
Raises
------
ValueError
Pixels are floating point and range outside [0, 1]
ValueError
Input pixels dtype not in the set {float32, float64, bool}.
ValueError
Output dtype not in the set {uint8, uint16}
"""
in_dtype = pixels.dtype
if in_dtype == out_dtype:
return pixels
if np.issubclass_(in_dtype.type, np.floating) or in_dtype == float:
if np.issubclass_(out_dtype, np.floating) or out_dtype == float:
return pixels.astype(out_dtype)
else:
p_min = pixels.min()
p_max = pixels.max()
if p_min < 0.0 or p_max > 1.0:
raise ValueError(
"Unexpected input range [{}, {}] - pixels "
"must be in the range [0, 1]".format(p_min, p_max)
)
elif in_dtype != bool:
raise ValueError(
"Unexpected input dtype ({}) - only float32, float64 "
"and bool supported".format(in_dtype)
)
if out_dtype == np.uint8:
max_range = 255.0
elif out_dtype == np.uint16:
max_range = 65535.0
else:
raise ValueError(
"Unexpected output dtype ({}) - normalisation range "
"is unknown".format(out_dtype)
)
return (pixels * max_range).astype(out_dtype)
def channels_to_back(pixels):
r"""
Roll the channels from the front to the back for an image. If the image
that is passed is already a numpy array, then that is also fine.
Always returns a numpy array because our :map:`Image` containers do not
support channels at the back.
Parameters
----------
image : `ndarray`
The pixels or image to roll the channel back for.
Returns
-------
rolled_pixels : `ndarray`
The numpy array of pixels with the channels on the last axis.
"""
return np.require(
np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype, requirements=["C"]
)
def channels_to_front(pixels):
r"""
Convert the given pixels array (channels assumed to be at the last axis
as is common in other imaging packages) into a numpy array.
Parameters
----------
pixels : ``(H, W, C)`` `buffer`
The pixels to convert to the Menpo channels at axis 0.
Returns
-------
pixels : ``(C, H, W)`` `ndarray`
Numpy array, channels as axis 0.
"""
if not isinstance(pixels, np.ndarray):
pixels = np.array(pixels)
return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype, requirements=["C"])
class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable):
r"""
An n-dimensional image.
Images are n-dimensional homogeneous regular arrays of data. Each
spatially distinct location in the array is referred to as a `pixel`.
At a pixel, ``k`` distinct pieces of information can be stored. Each
datum at a pixel is refereed to as being in a `channel`. All pixels in
the image have the same number of channels, and all channels have the
same data-type (`float64`).
Parameters
----------
image_data : ``(C, M, N ..., Q)`` `ndarray`
Array representing the image pixels, with the first axis being
channels.
copy : `bool`, optional
If ``False``, the ``image_data`` will not be copied on assignment.
Note that this will miss out on additional checks. Further note that we
still demand that the array is C-contiguous - if it isn't, a copy will
be generated anyway.
In general, this should only be used if you know what you are doing.
Raises
------
Warning
If ``copy=False`` cannot be honoured
ValueError
If the pixel array is malformed
"""
def __init__(self, image_data, copy=True):
super(Image, self).__init__()
if not copy:
if not image_data.flags.c_contiguous:
image_data = np.array(image_data, copy=True, order="C")
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
else:
image_data = np.array(image_data, copy=True, order="C")
# Degenerate case whereby we can just put the extra axis
# on ourselves
if image_data.ndim == 2:
# Ensures that the data STAYS C-contiguous
image_data = image_data.reshape((1,) + image_data.shape)
if image_data.ndim < 2:
raise ValueError(
"Pixel array has to be 2D (implicitly 1 channel, "
"2D shape) or 3D+ (n_channels, 2D+ shape) "
" - a {}D array "
"was provided".format(image_data.ndim)
)
self.pixels = image_data
@classmethod
def init_blank(cls, shape, n_channels=1, fill=0, dtype=float):
r"""
Returns a blank image.
Parameters
----------
shape : `tuple` or `list`
The shape of the image. Any floating point values are rounded up
to the nearest integer.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
Returns
-------
blank_image : :map:`Image`
A new image of the requested size.
"""
# Ensure that the '+' operator means concatenate tuples
shape = tuple(np.ceil(shape).astype(int))
if fill == 0:
pixels = np.zeros((n_channels,) + shape, dtype=dtype)
else:
pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill
# We know there is no need to copy...
return cls(pixels, copy=False)
@classmethod
def init_from_rolled_channels(cls, pixels):
r"""
Deprecated - please use the equivalent ``init_from_channels_at_back`` method.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .init_from_channels_at_back instead.",
MenpoDeprecationWarning,
)
return cls.init_from_channels_at_back(pixels)
@classmethod
def init_from_channels_at_back(cls, pixels):
r"""
Create an Image from a set of pixels where the channels axis is on
the last axis (the back). This is common in other frameworks, and
therefore this method provides a convenient means of creating a menpo
Image from such data. Note that a copy is always created due to the
need to rearrange the data.
Parameters
----------
pixels : ``(M, N ..., Q, C)`` `ndarray`
Array representing the image pixels, with the last axis being
channels.
Returns
-------
image : :map:`Image`
A new image from the given pixels, with the FIRST axis as the
channels.
Raises
------
ValueError
If image is not at least 2D, i.e. has at least 2 dimensions plus
the channels in the end.
"""
if pixels.ndim == 2:
pixels = pixels[..., None]
if pixels.ndim < 2:
raise ValueError(
"Pixel array has to be 2D "
"(2D shape, implicitly 1 channel) "
"or 3D+ (2D+ shape, n_channels) "
" - a {}D array "
"was provided".format(pixels.ndim)
)
return cls(channels_to_front(pixels))
@classmethod
def init_from_pointcloud(
cls,
pointcloud,
group=None,
boundary=0,
n_channels=1,
fill=0,
dtype=float,
return_transform=False,
):
r"""
Create an Image that is big enough to contain the given pointcloud.
The pointcloud will be translated to the origin and then translated
according to its bounds in order to fit inside the new image.
An optional boundary can be provided in order to increase the space
around the boundary of the pointcloud. The boundary will be added
to *all sides of the image* and so a boundary of 5 provides 10 pixels
of boundary total for each dimension.
Parameters
----------
pointcloud : :map:`PointCloud`
Pointcloud to place inside the newly created image.
group : `str`, optional
If ``None``, the pointcloud will only be used to create the image.
If a `str` then the pointcloud will be attached as a landmark
group to the image, with the given string as key.
boundary : `float`
A optional padding distance that is added to the pointcloud bounds.
Default is ``0``, meaning the max/min of tightest possible
containing image is returned.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
adjust the PointCloud in order to build the image, is returned.
Returns
-------
image : ``type(cls)`` Image or subclass
A new image with the same size as the given pointcloud, optionally
with the pointcloud attached as landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
# Translate pointcloud to the origin
minimum = pointcloud.bounds(boundary=boundary)[0]
tr = Translation(-minimum)
origin_pc = tr.apply(pointcloud)
image_shape = origin_pc.range(boundary=boundary)
new_image = cls.init_blank(
image_shape, n_channels=n_channels, fill=fill, dtype=dtype
)
if group is not None:
new_image.landmarks[group] = origin_pc
if return_transform:
return new_image, tr
else:
return new_image
def as_masked(self, mask=None, copy=True):
r"""
Return a copy of this image with an attached mask behavior.
A custom mask may be provided, or ``None``. See the :map:`MaskedImage`
constructor for details of how the kwargs will be handled.
Parameters
----------
mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage`
A mask to attach to the newly generated masked image.
copy : `bool`, optional
If ``False``, the produced :map:`MaskedImage` will share pixels with
``self``. Only suggested to be used for performance.
Returns
-------
masked_image : :map:`MaskedImage`
An image with the same pixels and landmarks as this one, but with
a mask.
"""
from menpo.image import MaskedImage
return copy_landmarks_and_path(
self, MaskedImage(self.pixels, mask=mask, copy=copy)
)
@property
def n_dims(self):
r"""
The number of dimensions in the image. The minimum possible ``n_dims``
is 2.
:type: `int`
"""
return len(self.shape)
@property
def n_pixels(self):
r"""
Total number of pixels in the image ``(prod(shape),)``
:type: `int`
"""
return self.pixels[0, ...].size
@property
def n_elements(self):
r"""
Total number of data points in the image
``(prod(shape), n_channels)``
:type: `int`
"""
return self.pixels.size
@property
def n_channels(self):
"""
The number of channels on each pixel in the image.
:type: `int`
"""
return self.pixels.shape[0]
@property
def width(self):
r"""
The width of the image.
This is the width according to image semantics, and is thus the size
of the **last** dimension.
:type: `int`
"""
return self.pixels.shape[-1]
@property
def height(self):
r"""
The height of the image.
This is the height according to image semantics, and is thus the size
of the **second to last** dimension.
:type: `int`
"""
return self.pixels.shape[-2]
@property
def shape(self):
r"""
The shape of the image
(with ``n_channel`` values at each point).
:type: `tuple`
"""
return self.pixels.shape[1:]
def bounds(self):
r"""
The bounds of the image, minimum is always (0, 0). The maximum is
the maximum **index** that can be used to index into the image for each
dimension. Therefore, bounds will be of the form:
((0, 0), (self.height - 1, self.width - 1)) for a 2D image.
Note that this is akin to supporting a nearest neighbour interpolation.
Although the *actual* maximum subpixel value would be something
like ``self.height - eps`` where ``eps`` is some value arbitrarily
close to 0, this value at least allows sampling without worrying about
floating point error.
:type: `tuple`
"""
return (0,) * self.n_dims, tuple(s - 1 for s in self.shape)
def diagonal(self):
r"""
The diagonal size of this image
:type: `float`
"""
return np.sqrt(np.sum(np.array(self.shape) ** 2))
def centre(self):
r"""
The geometric centre of the Image - the subpixel that is in the
middle.
Useful for aligning shapes and images.
:type: (``n_dims``,) `ndarray`
"""
return np.array(self.shape, dtype=np.double) / 2
def _str_shape(self):
if self.n_dims > 2:
return " x ".join(str(dim) for dim in self.shape)
elif self.n_dims == 2:
return "{}W x {}H".format(self.width, self.height)
def indices(self):
r"""
Return the indices of all pixels in this image.
:type: (``n_dims``, ``n_pixels``) ndarray
"""
return indices_for_image_of_shape(self.shape)
def _as_vector(self, keep_channels=False):
r"""
The vectorized form of this image.
Parameters
----------
keep_channels : `bool`, optional
========== =============================
Value Return shape
========== =============================
`False` ``(n_channels * n_pixels,)``
`True` ``(n_channels, n_pixels)``
========== =============================
Returns
-------
vec : (See ``keep_channels`` above) `ndarray`
Flattened representation of this image, containing all pixel
and channel information.
"""
if keep_channels:
return self.pixels.reshape([self.n_channels, -1])
else:
return self.pixels.ravel()
def from_vector(self, vector, n_channels=None, copy=True):
r"""
Takes a flattened vector and returns a new image formed by reshaping
the vector to the correct pixels and channels.
The `n_channels` argument is useful for when we want to add an extra
channel to an image but maintain the shape. For example, when
calculating the gradient.
Note that landmarks are transferred in the process.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
A flattened vector of all pixels and channels of an image.
n_channels : `int`, optional
If given, will assume that vector is the same shape as this image,
but with a possibly different number of channels.
copy : `bool`, optional
If ``False``, the vector will not be copied in creating the new
image.
Returns
-------
image : :map:`Image`
New image of same shape as this image and the number of
specified channels.
Raises
------
Warning
If the ``copy=False`` flag cannot be honored
"""
# This is useful for when we want to add an extra channel to an image
# but maintain the shape. For example, when calculating the gradient
n_channels = self.n_channels if n_channels is None else n_channels
image_data = vector.reshape((n_channels,) + self.shape)
new_image = Image(image_data, copy=copy)
new_image.landmarks = self.landmarks
return new_image
def _from_vector_inplace(self, vector, copy=True):
r"""
Takes a flattened vector and update this image by
reshaping the vector to the correct dimensions.
Parameters
----------
vector : ``(n_pixels,)`` `bool ndarray`
A vector vector of all the pixels of a :map:`BooleanImage`.
copy: `bool`, optional
If ``False``, the vector will be set as the pixels. If ``True``, a
copy of the vector is taken.
Raises
------
Warning
If ``copy=False`` flag cannot be honored
Note
----
For :map:`BooleanImage` this is rebuilding a boolean image **itself**
from boolean values. The mask is in no way interpreted in performing
the operation, in contrast to :map:`MaskedImage`, where only the masked
region is used in :meth:`from_vector_inplace` and :meth:`as_vector`.
"""
image_data = vector.reshape(self.pixels.shape)
if not copy:
if not image_data.flags.c_contiguous:
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
image_data = np.array(
image_data, copy=True, order="C", dtype=image_data.dtype
)
else:
image_data = np.array(
image_data, copy=True, order="C", dtype=image_data.dtype
)
self.pixels = image_data
def extract_channels(self, channels):
r"""
A copy of this image with only the specified channels.
Parameters
----------
channels : `int` or `[int]`
The channel index or `list` of channel indices to retain.
Returns
-------
image : `type(self)`
A copy of this image with only the channels requested.
"""
copy = self.copy()
if not isinstance(channels, list):
channels = [channels] # ensure we don't remove the channel axis
copy.pixels = self.pixels[channels]
return copy
def as_histogram(self, keep_channels=True, bins="unique"):
r"""
Histogram binning of the values of this image.
Parameters
----------
keep_channels : `bool`, optional
If set to ``False``, it returns a single histogram for all the
channels of the image. If set to ``True``, it returns a `list` of
histograms, one for each channel.
bins : ``{unique}``, positive `int` or sequence of scalars, optional
If set equal to ``'unique'``, the bins of the histograms are centred
on the unique values of each channel. If set equal to a positive
`int`, then this is the number of bins. If set equal to a
sequence of scalars, these will be used as bins centres.
Returns
-------
hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside
The histogram(s). If ``keep_channels=False``, then hist is an
`ndarray`. If ``keep_channels=True``, then hist is a `list` with
``len(hist)=n_channels``.
bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside
An array or a list of arrays corresponding to the above histograms
that store the bins' edges.
Raises
------
ValueError
Bins can be either 'unique', positive int or a sequence of scalars.
Examples
--------
Visualizing the histogram when a list of array bin edges is provided:
>>> hist, bin_edges = image.as_histogram()
>>> for k in range(len(hist)):
>>> plt.subplot(1,len(hist),k)
>>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0])
>>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2
>>> plt.bar(centre, hist[k], align='center', width=width)
"""
# parse options
if isinstance(bins, str):
if bins == "unique":
bins = 0
else:
raise ValueError(
"Bins can be either 'unique', positive int or"
"a sequence of scalars."
)
elif isinstance(bins, int) and bins < 1:
raise ValueError(
"Bins can be either 'unique', positive int or a " "sequence of scalars."
)
# compute histogram
vec = self.as_vector(keep_channels=keep_channels)
if len(vec.shape) == 1 or vec.shape[0] == 1:
if bins == 0:
bins = np.unique(vec)
hist, bin_edges = np.histogram(vec, bins=bins)
else:
hist = []
bin_edges = []
num_bins = bins
for ch in range(vec.shape[0]):
if bins == 0:
num_bins = np.unique(vec[ch, :])
h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins)
hist.append(h_tmp)
bin_edges.append(c_tmp)
return hist, bin_edges
def _view_2d(
self,
figure_id=None,
new_figure=False,
channels=None,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
r"""
View the image using the default image viewer. This method will appear
on the Image as ``view`` if the Image is 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return ImageViewer(
figure_id, new_figure, self.n_dims, self.pixels, channels=channels
).render(
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
def _view_landmarks_2d(
self,
channels=None,
group=None,
with_labels=None,
without_labels=None,
figure_id=None,
new_figure=False,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour=None,
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_legend=False,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
"""
Visualize the landmarks. This method will appear on the Image as
``view_landmarks`` if the Image is 2D.
Parameters
----------
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
from menpo.visualize import view_image_landmarks
return view_image_landmarks(
self,
channels,
False,
group,
with_labels,
without_labels,
figure_id,
new_figure,
interpolation,
cmap_name,
alpha,
render_lines,
line_colour,
line_style,
line_width,
render_markers,
marker_style,
marker_size,
marker_face_colour,
marker_edge_colour,
marker_edge_width,
render_numbering,
numbers_horizontal_align,
numbers_vertical_align,
numbers_font_name,
numbers_font_size,
numbers_font_style,
numbers_font_weight,
numbers_font_colour,
render_legend,
legend_title,
legend_font_name,
legend_font_style,
legend_font_size,
legend_font_weight,
legend_marker_scale,
legend_location,
legend_bbox_to_anchor,
legend_border_axes_pad,
legend_n_columns,
legend_horizontal_spacing,
legend_vertical_spacing,
legend_border,
legend_border_padding,
legend_shadow,
legend_rounded_corners,
render_axes,
axes_font_name,
axes_font_size,
axes_font_style,
axes_font_weight,
axes_x_limits,
axes_y_limits,
axes_x_ticks,
axes_y_ticks,
figure_size,
)
def crop(
self,
min_indices,
max_indices,
constrain_to_boundary=False,
return_transform=False,
):
r"""
Return a cropped copy of this image using the given minimum and
maximum indices. Landmarks are correctly adjusted so they maintain
their position relative to the newly cropped image.
Parameters
----------
min_indices : ``(n_dims,)`` `ndarray`
The minimum index over each dimension.
max_indices : ``(n_dims,)`` `ndarray`
The maximum index over each dimension.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
cropped_image : `type(self)`
A new instance of self, but cropped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
``min_indices`` and ``max_indices`` both have to be of length
``n_dims``. All ``max_indices`` must be greater than
``min_indices``.
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices = np.floor(min_indices)
max_indices = np.ceil(max_indices)
if not (min_indices.size == max_indices.size == self.n_dims):
raise ValueError(
"Both min and max indices should be 1D numpy arrays of"
" length n_dims ({})".format(self.n_dims)
)
elif not np.all(max_indices > min_indices):
raise ValueError("All max indices must be greater that the min " "indices")
min_bounded = self.constrain_points_to_bounds(min_indices)
max_bounded = self.constrain_points_to_bounds(max_indices)
all_max_bounded = np.all(min_bounded == min_indices)
all_min_bounded = np.all(max_bounded == max_indices)
if not (constrain_to_boundary or all_max_bounded or all_min_bounded):
# points have been constrained and the user didn't want this -
raise ImageBoundaryError(min_indices, max_indices, min_bounded, max_bounded)
new_shape = (max_bounded - min_bounded).astype(int)
return self.warp_to_shape(
new_shape,
Translation(min_bounded),
order=0,
warp_landmarks=True,
return_transform=return_transform,
)
def crop_to_pointcloud(
self, pointcloud, boundary=0, constrain_to_boundary=True, return_transform=False
):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the bounds of the pointcloud.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices, max_indices = pointcloud.bounds(boundary=boundary)
return self.crop(
min_indices,
max_indices,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_landmarks(
self, group=None, boundary=0, constrain_to_boundary=True, return_transform=False
):
r"""
Return a copy of this image cropped so that it is bounded around a set
of landmarks with an optional ``n_pixel`` boundary
Parameters
----------
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to its landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud(
pc,
boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_pointcloud_proportion(
self,
pointcloud,
boundary_proportion,
minimum=True,
constrain_to_boundary=True,
return_transform=False,
):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with a border proportional to the pointcloud spread or range.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the pointclouds' per-dimension range; if ``False`` w.r.t.
the maximum value of the pointclouds' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the border proportional to
the pointcloud spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
if minimum:
boundary = boundary_proportion * np.min(pointcloud.range())
else:
boundary = boundary_proportion * np.max(pointcloud.range())
return self.crop_to_pointcloud(
pointcloud,
boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_landmarks_proportion(
self,
boundary_proportion,
group=None,
minimum=True,
constrain_to_boundary=True,
return_transform=False,
):
r"""
Crop this image to be bounded around a set of landmarks with a
border proportional to the landmark spread or range.
Parameters
----------
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the landmarks' per-dimension range; if ``False`` w.r.t. the
maximum value of the landmarks' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
This image, cropped to its landmarks with a border proportional to
the landmark spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud_proportion(
pc,
boundary_proportion,
minimum=minimum,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def constrain_points_to_bounds(self, points):
r"""
Constrains the points provided to be within the bounds of this image.
Parameters
----------
points : ``(d,)`` `ndarray`
Points to be snapped to the image boundaries.
Returns
-------
bounded_points : ``(d,)`` `ndarray`
Points snapped to not stray outside the image edges.
"""
bounded_points = points.copy()
# check we don't stray under any edges
bounded_points[bounded_points < 0] = 0
# check we don't stray over any edges
shape = np.array(self.shape)
over_image = (shape - bounded_points) < 0
bounded_points[over_image] = shape[over_image]
return bounded_points
def extract_patches(
self,
patch_centers,
patch_shape=(16, 16),
sample_offsets=None,
as_single_array=True,
order=0,
mode="constant",
cval=0.0,
):
r"""
Extract a set of patches from an image. Given a set of patch centers
and a patch size, patches are extracted from within the image, centred
on the given coordinates. Sample offsets denote a set of offsets to
extract from within a patch. This is very useful if you want to extract
a dense set of features around a set of landmarks and simply sample the
same grid of patches around the landmarks.
If sample offsets are used, to access the offsets for each patch you
need to slice the resulting `list`. So for 2 offsets, the first centers
offset patches would be ``patches[:2]``.
Currently only 2D images are supported.
Note that the default is nearest neighbour sampling for the patches
which is achieved via slicing and is much more efficient than using
sampling/interpolation. Note that a significant performance decrease
will be measured if the ``order`` or ``mode`` parameters are modified
from ``order = 0`` and ``mode = 'constant'`` as internally sampling
will be used rather than slicing.
Parameters
----------
patch_centers : :map:`PointCloud`
The centers to extract patches around.
patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according to
the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside the
image boundaries.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
if self.n_dims != 2:
raise ValueError(
"Only two dimensional patch extraction is " "currently supported."
)
if order == 0 and mode == "constant":
# Fast path using slicing
single_array = extract_patches_with_slice(
self.pixels,
patch_centers.points,
patch_shape,
offsets=sample_offsets,
cval=cval,
)
else:
single_array = extract_patches_by_sampling(
self.pixels,
patch_centers.points,
patch_shape,
offsets=sample_offsets,
order=order,
mode=mode,
cval=cval,
)
if as_single_array:
return single_array
else:
return [Image(o, copy=False) for p in single_array for o in p]
def extract_patches_around_landmarks(
self,
group=None,
patch_shape=(16, 16),
sample_offsets=None,
as_single_array=True,
):
r"""
Extract patches around landmarks existing on this image. Provided the
group label and optionally the landmark label extract a set of patches.
See `extract_patches` for more information.
Currently only 2D images are supported.
Parameters
----------
group : `str` or ``None``, optional
The landmark group to use as patch centres.
patch_shape : `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
return self.extract_patches(
self.landmarks[group],
patch_shape=patch_shape,
sample_offsets=sample_offsets,
as_single_array=as_single_array,
)
def set_patches(self, patches, patch_centers, offset=None, offset_index=None):
r"""
Set the values of a group of patches into the correct regions of a copy
of this image. Given an array of patches and a set of patch centers,
the patches' values are copied in the regions of the image that are
centred on the coordinates of the given centers.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
# parse arguments
if self.n_dims != 2:
raise ValueError(
"Only two dimensional patch insertion is " "currently supported."
)
if offset is None:
offset = np.zeros([1, 2], dtype=np.intp)
elif isinstance(offset, tuple) or isinstance(offset, list):
offset = np.asarray([offset])
offset = np.require(offset, dtype=np.intp)
if not offset.shape == (1, 2):
raise ValueError(
"The offset must be a tuple, a list or a "
"numpy.array with shape (1, 2)."
)
if offset_index is None:
offset_index = 0
# if patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(
patches, patch_centers.n_points
)
copy = self.copy()
# set patches
set_patches(patches, copy.pixels, patch_centers.points, offset, offset_index)
return copy
def set_patches_around_landmarks(
self, patches, group=None, offset=None, offset_index=None
):
r"""
Set the values of a group of patches around the landmarks existing in a
copy of this image. Given an array of patches, a group and a label, the
patches' values are copied in the regions of the image that are
centred on the coordinates of corresponding landmarks.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
group : `str` or ``None`` optional
The landmark group to use as patch centres.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
return self.set_patches(
patches, self.landmarks[group], offset=offset, offset_index=offset_index
)
def warp_to_mask(
self,
template_mask,
transform,
warp_landmarks=True,
order=1,
mode="constant",
cval=0.0,
batch_size=None,
return_transform=False,
):
r"""
Return a copy of this image warped into a different reference space.
Note that warping into a mask is slower than warping into a full image.
If you don't need a non-linear mask, consider :meth:``warp_to_shape``
instead.
Parameters
----------
template_mask : :map:`BooleanImage`
Defines the shape of the result, and what pixels should be sampled.
transform : :map:`Transform`
Transform **from the template space back to this image**.
Defines, for each pixel location on the template, which pixel
location should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : :map:`MaskedImage`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
if self.n_dims != transform.n_dims:
raise ValueError(
"Trying to warp a {}D image with a {}D transform "
"(they must match)".format(self.n_dims, transform.n_dims)
)
template_points = template_mask.true_indices()
points_to_sample = transform.apply(template_points, batch_size=batch_size)
sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_image = self._build_warp_to_mask(template_mask, sampled)
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, "path"):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def _build_warp_to_mask(self, template_mask, sampled_pixel_values):
r"""
Builds the warped image from the template mask and sampled pixel values.
Overridden for :map:`BooleanImage` as we can't use the usual
:meth:`from_vector_inplace` method. All other :map:`Image` classes
share the :map:`Image` implementation.
Parameters
----------
template_mask : :map:`BooleanImage` or 2D `bool ndarray`
Mask for warping.
sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray`
Sampled value to rebuild the masked image from.
"""
from menpo.image import MaskedImage
warped_image = MaskedImage.init_blank(
template_mask.shape, n_channels=self.n_channels, mask=template_mask
)
warped_image._from_vector_inplace(sampled_pixel_values.ravel())
return warped_image
def sample(self, points_to_sample, order=1, mode="constant", cval=0.0):
r"""
Sample this image at the given sub-pixel accurate points. The input
PointCloud should have the same number of dimensions as the image e.g.
a 2D PointCloud for a 2D multi-channel image. A numpy array will be
returned the has the values for every given point across each channel
of the image.
Parameters
----------
points_to_sample : :map:`PointCloud`
Array of points to sample from the image. Should be
`(n_points, n_dims)`
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
Returns
-------
sampled_pixels : (`n_points`, `n_channels`) `ndarray`
The interpolated values taken across every channel of the image.
"""
# The public interface is a PointCloud, but when this is used internally
# a numpy array is passed. So let's just treat the PointCloud as a
# 'special case' and not document the ndarray ability.
if isinstance(points_to_sample, PointCloud):
points_to_sample = points_to_sample.points
return scipy_interpolation(
self.pixels, points_to_sample, order=order, mode=mode, cval=cval
)
def warp_to_shape(
self,
template_shape,
transform,
warp_landmarks=True,
order=1,
mode="constant",
cval=0.0,
batch_size=None,
return_transform=False,
):
"""
Return a copy of this image warped into a different reference space.
Parameters
----------
template_shape : `tuple` or `ndarray`
Defines the shape of the result, and what pixel indices should be
sampled (all of them).
transform : :map:`Transform`
Transform **from the template_shape space back to this image**.
Defines, for each index on template_shape, which pixel location
should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : `type(self)`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
template_shape = np.array(template_shape, dtype=int)
if (
isinstance(transform, Homogeneous)
and order in range(2)
and self.n_dims == 2
and cv2_perspective_interpolation is not None
):
# we couldn't do the crop, but OpenCV has an optimised
# interpolation for 2D perspective warps - let's use that
warped_pixels = cv2_perspective_interpolation(
self.pixels,
template_shape,
transform,
order=order,
mode=mode,
cval=cval,
)
else:
template_points = indices_for_image_of_shape(template_shape)
points_to_sample = transform.apply(template_points, batch_size=batch_size)
sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval)
# set any nan values to 0
# (seems that map_coordinates can produce nan values)
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_pixels = sampled.reshape((self.n_channels,) + tuple(template_shape))
return self._build_warp_to_shape(
warped_pixels, transform, warp_landmarks, return_transform
)
def _build_warp_to_shape(
self, warped_pixels, transform, warp_landmarks, return_transform
):
# factored out common logic from the different paths we can take in
# warp_to_shape. Rebuilds an image post-warp, adjusting landmarks
# as necessary.
warped_image = Image(warped_pixels, copy=False)
# warp landmarks if requested.
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, "path"):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def rescale(
self, scale, round="ceil", order=1, warp_landmarks=True, return_transform=False
):
r"""
Return a copy of this image, rescaled by a given factor.
Landmarks are rescaled appropriately.
Parameters
----------
scale : `float` or `tuple` of `floats`
The scale factor. If a tuple, the scale to apply to each dimension.
If a single `float`, the scale will be applied uniformly across
each dimension.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If less scales than dimensions are provided.
If any scale is less than or equal to 0.
"""
# Pythonic way of converting to list if we are passed a single float
try:
if len(scale) < self.n_dims:
raise ValueError(
"Must provide a scale per dimension."
"{} scales were provided, {} were expected.".format(
len(scale), self.n_dims
)
)
except TypeError: # Thrown when len() is called on a float
scale = [scale] * self.n_dims
# Make sure we have a numpy array
scale = np.asarray(scale)
for s in scale:
if s <= 0:
raise ValueError("Scales must be positive floats.")
transform = NonUniformScale(scale)
# use the scale factor to make the template mask bigger
# while respecting the users rounding preference.
template_shape = round_image_shape(transform.apply(self.shape), round)
# due to image indexing, we can't just apply the pseudoinverse
# transform to achieve the scaling we want though!
# Consider a 3x rescale on a 2x4 image. Looking at each dimension:
# H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x
# W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x
# => need to make the correct scale per dimension!
shape = np.array(self.shape, dtype=float)
# scale factors = max_index_after / current_max_index
# (note that max_index = length - 1, as 0 based)
scale_factors = (scale * shape - 1) / (shape - 1)
inverse_transform = NonUniformScale(scale_factors).pseudoinverse()
# for rescaling we enforce that mode is nearest to avoid num. errors
return self.warp_to_shape(
template_shape,
inverse_transform,
warp_landmarks=warp_landmarks,
order=order,
mode="nearest",
return_transform=return_transform,
)
def rescale_to_diagonal(
self, diagonal, round="ceil", warp_landmarks=True, return_transform=False
):
r"""
Return a copy of this image, rescaled so that the it's diagonal is a
new size.
Parameters
----------
diagonal: `int`
The diagonal size of the new image.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : type(self)
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
return self.rescale(
diagonal / self.diagonal(),
round=round,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rescale_to_pointcloud(
self,
pointcloud,
group=None,
round="ceil",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rescaled so that the scale of a
particular group of landmarks matches the scale of the passed
reference pointcloud.
Parameters
----------
pointcloud: :map:`PointCloud`
The reference pointcloud to which the landmarks specified by
``group`` will be scaled to match.
group : `str`, optional
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
pc = self.landmarks[group]
scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy()
return self.rescale(
scale,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rescale_landmarks_to_diagonal_range(
self,
diagonal_range,
group=None,
round="ceil",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rescaled so that the ``diagonal_range`` of
the bounding box containing its landmarks matches the specified
``diagonal_range`` range.
Parameters
----------
diagonal_range: ``(n_dims,)`` `ndarray`
The diagonal_range range that we want the landmarks of the returned
image to have.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
round : ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
x, y = self.landmarks[group].range()
scale = diagonal_range / np.sqrt(x ** 2 + y ** 2)
return self.rescale(
scale,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def resize(self, shape, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, resized to a particular shape.
All image information (landmarks, and mask in the case of
:map:`MaskedImage`) is resized appropriately.
Parameters
----------
shape : `tuple`
The new shape to resize to.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the resize is also returned.
Returns
-------
resized_image : ``type(self)``
A copy of this image, resized.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If the number of dimensions of the new shape does not match
the number of dimensions of the image.
"""
shape = np.asarray(shape, dtype=float)
if len(shape) != self.n_dims:
raise ValueError(
"Dimensions must match."
"{} dimensions provided, {} were expected.".format(
shape.shape, self.n_dims
)
)
scales = shape / self.shape
# Have to round the shape when scaling to deal with floating point
# errors. For example, if we want (250, 250), we need to ensure that
# we get (250, 250) even if the number we obtain is 250 to some
# floating point inaccuracy.
return self.rescale(
scales,
round="round",
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def zoom(self, scale, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, zoomed about the centre point. ``scale``
values greater than 1.0 denote zooming **in** to the image and values
less than 1.0 denote zooming **out** of the image. The size of the
image will not change, if you wish to scale an image, please see
:meth:`rescale`.
Parameters
----------
scale : `float`
``scale > 1.0`` denotes zooming in. Thus the image will appear
larger and areas at the edge of the zoom will be 'cropped' out.
``scale < 1.0`` denotes zooming out. The image will be padded
by the value of ``cval``.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the zooming is also returned.
Returns
-------
zoomed_image : ``type(self)``
A copy of this image, zoomed.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
t = scale_about_centre(self, 1.0 / scale)
return self.warp_to_shape(
self.shape,
t,
order=order,
mode="nearest",
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rotate_ccw_about_centre(
self,
theta,
degrees=True,
retain_shape=False,
mode="constant",
cval=0.0,
round="round",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rotated counter-clockwise about its centre.
Note that the `retain_shape` argument defines the shape of the rotated
image. If ``retain_shape=True``, then the shape of the rotated image
will be the same as the one of current image, so some regions will
probably be cropped. If ``retain_shape=False``, then the returned image
has the correct size so that the whole area of the current image is
included.
Parameters
----------
theta : `float`
The angle of rotation about the centre.
degrees : `bool`, optional
If ``True``, `theta` is interpreted in degrees. If ``False``,
``theta`` is interpreted as radians.
retain_shape : `bool`, optional
If ``True``, then the shape of the rotated image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
The value to be set outside the rotated image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rotation is also returned.
Returns
-------
rotated_image : ``type(self)``
The rotated image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
Image rotation is presently only supported on 2D images
"""
if self.n_dims != 2:
raise ValueError(
"Image rotation is presently only supported on " "2D images"
)
rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return self.transform_about_centre(
rotation,
retain_shape=retain_shape,
mode=mode,
cval=cval,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def transform_about_centre(
self,
transform,
retain_shape=False,
mode="constant",
cval=0.0,
round="round",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, transformed about its centre.
Note that the `retain_shape` argument defines the shape of the
transformed image. If ``retain_shape=True``, then the shape of the
transformed image will be the same as the one of current image, so some
regions will probably be cropped. If ``retain_shape=False``, then the
returned image has the correct size so that the whole area of the
current image is included.
.. note::
This method will not work for transforms that result in a transform
chain as :map:`TransformChain` is not invertible.
.. note::
Be careful when defining transforms for warping imgaes. All pixel
locations must fall within a valid range as expected by the
transform. Therefore, your transformation must accept 'negative'
pixel locations as the pixel locations provided to your transform
will have the object centre subtracted from them.
Parameters
----------
transform : :map:`ComposableTransform` and :map:`VInvertible` type
A composable transform. ``pseudoinverse`` will be invoked on the
resulting transform so it must implement a valid inverse.
retain_shape : `bool`, optional
If ``True``, then the shape of the sheared image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
The value to be set outside the sheared image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the shearing is also returned.
Returns
-------
transformed_image : ``type(self)``
The transformed image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Examples
--------
This is an example for rotating an image about its center. Let's
first load an image, create the rotation transform and then apply it ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Rotation
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
rot_tr = Rotation.init_from_2d_ccw_angle(45)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render rotated image
plt.subplot(132)
im.transform_about_centre(rot_tr).view_landmarks()
plt.title('Rotated')
# Render rotated image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks()
plt.title('Rotated (Retain original shape)')
Similarly, in order to apply a shear transform ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Affine
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
shear_tr = Affine.init_from_2d_shear(25, 10)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render sheared image
plt.subplot(132)
im.transform_about_centre(shear_tr).view_landmarks()
plt.title('Sheared')
# Render sheared image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(shear_tr,
retain_shape=True).view_landmarks()
plt.title('Sheared (Retain original shape)')
"""
if retain_shape:
shape = self.shape
applied_transform = transform_about_centre(self, transform)
else:
# Get image's bounding box coordinates
original_bbox = bounding_box((0, 0), np.array(self.shape) - 1)
# Translate to origin and apply transform
trans = Translation(-self.centre(), skip_checks=True).compose_before(
transform
)
transformed_bbox = trans.apply(original_bbox)
# Create new translation so that min bbox values go to 0
t = Translation(-transformed_bbox.bounds()[0])
applied_transform = trans.compose_before(t)
transformed_bbox = trans.apply(original_bbox)
# Output image's shape is the range of the sheared bounding box
# while respecting the users rounding preference.
shape = round_image_shape(transformed_bbox.range() + 1, round)
# Warp image
return self.warp_to_shape(
shape,
applied_transform.pseudoinverse(),
order=order,
warp_landmarks=warp_landmarks,
mode=mode,
cval=cval,
return_transform=return_transform,
)
def mirror(self, axis=1, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, mirrored/flipped about a certain axis.
Parameters
----------
axis : `int`, optional
The axis about which to mirror the image.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the mirroring is also returned.
Returns
-------
mirrored_image : ``type(self)``
The mirrored image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
axis cannot be negative
ValueError
axis={} but the image has {} dimensions
"""
# Check axis argument
if axis < 0:
raise ValueError("axis cannot be negative")
elif axis >= self.n_dims:
raise ValueError(
"axis={} but the image has {} " "dimensions".format(axis, self.n_dims)
)
# Create transform that includes ...
# ... flipping about the selected axis ...
rot_matrix = np.eye(self.n_dims)
rot_matrix[axis, axis] = -1
# ... and translating back to the image's bbox
tr_matrix = np.zeros(self.n_dims)
tr_matrix[axis] = self.shape[axis] - 1
# Create transform object
trans = Rotation(rot_matrix, skip_checks=True).compose_before(
Translation(tr_matrix, skip_checks=True)
)
# Warp image
return self.warp_to_shape(
self.shape,
trans.pseudoinverse(),
mode="nearest",
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def pyramid(self, n_levels=3, downscale=2):
r"""
Return a rescaled pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
image = self.copy()
yield image
for _ in range(n_levels - 1):
image = image.rescale(1.0 / downscale)
yield image
def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None):
r"""
Return the gaussian pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
sigma : `float`, optional
Sigma for gaussian filter. Default is ``downscale / 3.`` which
corresponds to a filter mask twice the size of the scale factor
that covers more than 99% of the gaussian distribution.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
from menpo.feature import gaussian_filter
if sigma is None:
sigma = downscale / 3.0
image = self.copy()
yield image
for level in range(n_levels - 1):
image = gaussian_filter(image, sigma).rescale(1.0 / downscale)
yield image
def as_greyscale(self, mode="luminosity", channel=None):
r"""
Returns a greyscale version of the image. If the image does *not*
represent a 2D RGB image, then the ``luminosity`` mode will fail.
Parameters
----------
mode : ``{average, luminosity, channel}``, optional
============== =====================================================
mode Greyscale Algorithm
============== =====================================================
average Equal average of all channels
luminosity Calculates the luminance using the CCIR 601 formula:
| .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B'
channel A specific channel is chosen as the intensity value.
============== =====================================================
channel: `int`, optional
The channel to be taken. Only used if mode is ``channel``.
Returns
-------
greyscale_image : :map:`MaskedImage`
A copy of this image in greyscale.
"""
greyscale = self.copy()
if mode == "luminosity":
if self.n_dims != 2:
raise ValueError(
"The 'luminosity' mode only works on 2D RGB"
"images. {} dimensions found, "
"2 expected.".format(self.n_dims)
)
elif self.n_channels != 3:
raise ValueError(
"The 'luminosity' mode only works on RGB"
"images. {} channels found, "
"3 expected.".format(self.n_channels)
)
# Only compute the coefficients once.
global _greyscale_luminosity_coef
if _greyscale_luminosity_coef is None:
_greyscale_luminosity_coef = np.linalg.inv(
np.array(
[
[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.106, 1.703],
]
)
)[0, :]
# Compute greyscale via dot product
pixels = np.dot(_greyscale_luminosity_coef, greyscale.pixels.reshape(3, -1))
# Reshape image back to original shape (with 1 channel)
pixels = pixels.reshape(greyscale.shape)
elif mode == "average":
pixels = np.mean(greyscale.pixels, axis=0)
elif mode == "channel":
if channel is None:
raise ValueError(
"For the 'channel' mode you have to provide" " a channel index"
)
pixels = greyscale.pixels[channel]
else:
raise ValueError(
"Unknown mode {} - expected 'luminosity', "
"'average' or 'channel'.".format(mode)
)
# Set new pixels - ensure channel axis and maintain
greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype, copy=False)
return greyscale
def as_PILImage(self, out_dtype=np.uint8):
r"""
Return a PIL copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
pil_image : `PILImage`
PIL copy of image
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 is supported.
"""
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
"Can only convert greyscale or RGB 2D images. "
"Received a {} channel {}D image.".format(self.n_channels, self.n_dims)
)
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
pixels = denormalize_pixels_range(pixels, out_dtype)
return PILImage.fromarray(pixels)
def as_imageio(self, out_dtype=np.uint8):
r"""
Return an Imageio copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
imageio_image : `ndarray`
Imageio image (which is just a numpy ndarray with the channels
as the last axis).
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 and uint16
are supported.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .pixels_with_channels_at_back instead.",
MenpoDeprecationWarning,
)
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
"Can only convert greyscale or RGB 2D images. "
"Received a {} channel {}D image.".format(self.n_channels, self.n_dims)
)
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
return denormalize_pixels_range(pixels, out_dtype)
def pixels_range(self):
r"""
The range of the pixel values (min and max pixel values).
Returns
-------
min_max : ``(dtype, dtype)``
The minimum and maximum value of the pixels array.
"""
return self.pixels.min(), self.pixels.max()
def rolled_channels(self):
r"""
Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .pixels_with_channels_at_back() instead.",
MenpoDeprecationWarning,
)
return self.pixels_with_channels_at_back()
def pixels_with_channels_at_back(self, out_dtype=None):
r"""
Returns the pixels matrix, with the channels rolled to the back axis.
This may be required for interacting with external code bases that
require images to have channels as the last axis, rather than the
Menpo convention of channels as the first axis.
If this image is single channel, the final axis is dropped.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
rolled_channels : `ndarray`
Pixels with channels as the back (last) axis. If single channel,
the last axis will be dropped.
"""
p = channels_to_back(self.pixels)
if out_dtype is not None:
p = denormalize_pixels_range(p, out_dtype=out_dtype)
return np.squeeze(p)
def __str__(self):
return "{} {}D Image with {} channel{}".format(
self._str_shape(), self.n_dims, self.n_channels, "s" * (self.n_channels > 1)
)
def has_landmarks_outside_bounds(self):
"""
Indicates whether there are landmarks located outside the image bounds.
:type: `bool`
"""
if self.has_landmarks:
for l_group in self.landmarks:
pc = self.landmarks[l_group].points
if np.any(np.logical_or(self.shape - pc < 1, pc < 0)):
return True
return False
def constrain_landmarks_to_bounds(self):
r"""
Deprecated - please use the equivalent ``constrain_to_bounds`` method
now on PointCloud, in conjunction with the new Image ``bounds()``
method. For example:
>>> im.constrain_landmarks_to_bounds() # Equivalent to below
>>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds())
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .constrain_to_bounds() instead (on PointCloud).",
MenpoDeprecationWarning,
)
for l_group in self.landmarks:
l = self.landmarks[l_group]
for k in range(l.points.shape[1]):
tmp = l.points[:, k]
tmp[tmp < 0] = 0
tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1
l.points[:, k] = tmp
self.landmarks[l_group] = l
def normalize_std(self, mode="all", **kwargs):
r"""
Returns a copy of this image normalized such that its
pixel values have zero mean and unit variance.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .normalize_std() instead (features package).",
MenpoDeprecationWarning,
)
return self._normalize(np.std, mode=mode)
def normalize_norm(self, mode="all", **kwargs):
r"""
Returns a copy of this image normalized such that its pixel values
have zero mean and its norm equals 1.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
unit norm.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .normalize_norm() instead (features package).",
MenpoDeprecationWarning,
)
def scale_func(pixels, axis=None):
return np.linalg.norm(pixels, axis=axis, **kwargs)
return self._normalize(scale_func, mode=mode)
def _normalize(self, scale_func, mode="all"):
from menpo.feature import normalize
return normalize(self, scale_func=scale_func, mode=mode)
def rescale_pixels(self, minimum, maximum, per_channel=True):
r"""A copy of this image with pixels linearly rescaled to fit a range.
Note that the only pixels that will be considered and rescaled are those
that feature in the vectorized form of this image. If you want to use
this routine on all the pixels in a :map:`MaskedImage`, consider
using `as_unmasked()` prior to this call.
Parameters
----------
minimum: `float`
The minimal value of the rescaled pixels
maximum: `float`
The maximal value of the rescaled pixels
per_channel: `boolean`, optional
If ``True``, each channel will be rescaled independently. If
``False``, the scaling will be over all channels.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
v = self.as_vector(keep_channels=True).T
if per_channel:
min_, max_ = v.min(axis=0), v.max(axis=0)
else:
min_, max_ = v.min(), v.max()
sf = ((maximum - minimum) * 1.0) / (max_ - min_)
v_new = ((v - min_) * sf) + minimum
return self.from_vector(v_new.T.ravel())
def clip_pixels(self, minimum=None, maximum=None):
r"""A copy of this image with pixels linearly clipped to fit a range.
Parameters
----------
minimum: `float`, optional
The minimal value of the clipped pixels. If None is provided, the
default value will be 0.
maximum: `float`, optional
The maximal value of the clipped pixels. If None is provided, the
default value will depend on the dtype.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
if minimum is None:
minimum = 0
if maximum is None:
dtype = self.pixels.dtype
if dtype == np.uint8:
maximum = 255
elif dtype == np.uint16:
maximum = 65535
elif dtype in [np.float32, np.float64]:
maximum = 1.0
else:
m1 = "Could not recognise the dtype ({}) to set the maximum."
raise ValueError(m1.format(dtype))
copy = self.copy()
copy.pixels = copy.pixels.clip(min=minimum, max=maximum)
return copy
def rasterize_landmarks(
self,
group=None,
render_lines=True,
line_style="-",
line_colour="b",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=1,
marker_face_colour="b",
marker_edge_colour="b",
marker_edge_width=1,
backend="matplotlib",
):
r"""
This method provides the ability to rasterize 2D landmarks onto the
image. The returned image has the specified landmark groups rasterized
onto the image - which is useful for things like creating result
examples or rendering videos with annotations.
Since multiple landmark groups can be specified, all arguments can take
lists of parameters that map to the provided groups list. Therefore, the
parameters must be lists of the correct length or a single parameter to
apply to every landmark group.
Multiple backends are provided, all with different strengths. The
'pillow' backend is very fast, but not very flexible. The `matplotlib`
backend should be feature compatible with other Menpo rendering methods,
but is much slower due to the overhead of creating a figure to render
into.
Parameters
----------
group : `str` or `list` of `str`, optional
The landmark group key, or a list of keys.
render_lines : `bool`, optional
If ``True``, and the provided landmark group is a
:map:`PointDirectedGraph`, the edges are rendered.
line_style : `str`, optional
The style of the edge line. Not all backends support this argument.
line_colour : `str` or `tuple`, optional
A Matplotlib style colour or a backend dependant colour.
line_width : `int`, optional
The width of the line to rasterize.
render_markers : `bool`, optional
If ``True``, render markers at the coordinates of each landmark.
marker_style : `str`, optional
A Matplotlib marker style. Not all backends support all marker
styles.
marker_size : `int`, optional
The size of the marker - different backends use different scale
spaces so consistent output may by difficult.
marker_face_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_width : `int`, optional
The width of the marker edge. Not all backends support this.
backend : {'matplotlib', 'pillow'}, optional
The backend to use.
Returns
-------
rasterized_image : :map:`Image`
The image with the landmarks rasterized directly into the pixels.
Raises
------
ValueError
Only 2D images are supported.
ValueError
Only RGB (3-channel) or Greyscale (1-channel) images are supported.
"""
from .rasterize import rasterize_landmarks_2d
return rasterize_landmarks_2d(
self,
group=group,
render_lines=render_lines,
line_style=line_style,
line_colour=line_colour,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
backend=backend,
)
def round_image_shape(shape, round):
if round not in ["ceil", "round", "floor"]:
raise ValueError("round must be either ceil, round or floor")
# Ensure that the '+' operator means concatenate tuples
return tuple(getattr(np, round)(shape).astype(int))
def _convert_patches_list_to_single_array(patches_list, n_center):
r"""
Converts patches from a `list` of :map:`Image` objects to a single `ndarray`
with shape ``(n_center, n_offset, self.n_channels, patch_shape)``.
Note that these two are the formats returned by the `extract_patches()`
and `extract_patches_around_landmarks()` methods of :map:`Image` class.
Parameters
----------
patches_list : `list` of `n_center * n_offset` :map:`Image` objects
A `list` that contains all the patches as :map:`Image` objects.
n_center : `int`
The number of centers from which the patches are extracted.
Returns
-------
patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)``
The numpy array that contains all the patches.
"""
n_offsets = int(len(patches_list) / n_center)
n_channels = patches_list[0].n_channels
height = patches_list[0].height
width = patches_list[0].width
patches_array = np.empty(
(n_center, n_offsets, n_channels, height, width),
dtype=patches_list[0].pixels.dtype,
)
total_index = 0
for p in range(n_center):
for o in range(n_offsets):
patches_array[p, o, ...] = patches_list[total_index].pixels
total_index += 1
return patches_array
def _create_patches_image(
patches, patch_centers, patches_indices=None, offset_index=None, background="black"
):
r"""
Creates an :map:`Image` object in which the patches are located on the
correct regions based on the centers. Thus, the image is a block-sparse
matrix. It has also attached a `patch_Centers` :map:`PointCloud`
object with the centers that correspond to the patches that the user
selected to set.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be set (copied) to the image. If ``None``,
then all the patches are copied.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
Returns
-------
patches_image : :map:`Image`
The output patches image object.
Raises
------
ValueError
Background must be either ''black'' or ''white''.
"""
# If patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points)
# Parse inputs
if offset_index is None:
offset_index = 0
if patches_indices is None:
patches_indices = np.arange(patches.shape[0])
elif not isinstance(patches_indices, Iterable):
patches_indices = [patches_indices]
# Compute patches image's shape
n_channels = patches.shape[2]
patch_shape0 = patches.shape[3]
patch_shape1 = patches.shape[4]
top, left = np.min(patch_centers.points, 0)
bottom, right = np.max(patch_centers.points, 0)
min_0 = np.floor(top - patch_shape0)
min_1 = np.floor(left - patch_shape1)
max_0 = np.ceil(bottom + patch_shape0)
max_1 = np.ceil(right + patch_shape1)
height = max_0 - min_0 + 1
width = max_1 - min_1 + 1
# Translate the patch centers to fit in the new image
new_patch_centers = patch_centers.copy()
new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]])
# Create new image with the correct background values
if background == "black":
patches_image = Image.init_blank(
(height, width),
n_channels,
fill=np.min(patches[patches_indices]),
dtype=patches.dtype,
)
elif background == "white":
patches_image = Image.init_blank(
(height, width),
n_channels,
fill=np.max(patches[patches_indices]),
dtype=patches.dtype,
)
else:
raise ValueError("Background must be either " "black" " or " "white" ".")
# If there was no slicing on the patches, then attach the original patch
# centers. Otherwise, attach the sliced ones.
if set(patches_indices) == set(range(patches.shape[0])):
patches_image.landmarks["patch_centers"] = new_patch_centers
else:
tmp_centers = PointCloud(new_patch_centers.points[patches_indices])
patches_image.landmarks["patch_centers"] = tmp_centers
# Set the patches
return patches_image.set_patches_around_landmarks(
patches[patches_indices], group="patch_centers", offset_index=offset_index
)
| bsd-3-clause |
hamogu/marxs | marxs/visualization/utils.py | 1 | 10016 | # Licensed under GPL version 3 - see LICENSE.rst
'''This module collects helper functions for visualization that are of use for several backends.
The functions here are not intended to be called directly by the user. Instead, they
refractor common tasks that are used in several visualization backends.
'''
from __future__ import division
import warnings
import numpy as np
class MARXSVisualizationWarning(Warning):
'''Warning class for MARXS objects missing from plotting'''
pass
def get_obj_name(obj):
'''Return printable name for objects or functions.'''
if hasattr(obj, 'name'):
return obj.name
elif hasattr(obj, 'func_name'):
return obj.func_name
else:
return str(obj)
class DisplayDict(dict):
'''A dictionary to store how an element is displayed in plotting.
A dictionary of this type works just like a normal dictionary, except for an
additional look-up step for keys that are not found in the dictionary itself.
A ``DisplayDict`` is initialized with a reference to the object it describes
and any parameters accessed from ``DisplayDict`` that is not found in the
dictionary, will be searched in the objects geometry. This allows us to set
any and all display settings in the ``DisplayDict`` to custamize plotting in any
way, but for those values that are not set, fall back to the settings of the
geometry (e.g. the shape of an object is typically taken from the geometry,
while the color is not).
Parameters
----------
parent : `marxs.base.MarxsElement`
Reference to the object that is described by this ``DisplayDict``
args, kwargs: see `dict`
'''
def __init__(self, parent, *args, **kwargs):
self.parent = parent
super(DisplayDict, self).__init__(*args, **kwargs)
def __getitem__(self, key):
if (key not in self) and hasattr(self.parent, 'geometry'):
try:
return getattr(self.parent.geometry, key)
except AttributeError:
raise KeyError(key)
else:
return super(DisplayDict, self).__getitem__(key)
def get(self, k, d=None):
'''D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'''
try:
return self[k]
except:
KeyError
return d
def plot_object_general(plot_registry, obj, display=None, **kwargs):
'''Look up a plotting routine for an object and execute it.
This function is not meant to be called directly by the user, instead, it
is designed to simplify the implementation of new plotting backends.
Parameters
----------
plot_registry : dict
Keys are the names of the shape of an object and values in this
dictionary are functions that know how to plot this type of shape. The
appropriate plotting function is then called with the input `obj`,
`display` and any other keyword arguments.
If the shape is ``"None"`` (as a string), no plotting function is
called.
obj : `marxs.base.MarxsElement`
The element that should be plotted.
display : dict of None
Dictionary with display settings. If this is ``None``, ``obj.display``
is used. If that is also ``None`` then the objects is skipped.
kwargs : other keyword arguments
These arguments are just passed through to the plotting function.
Returns
-------
out : different
The output from the plotting function that was executed is passed
through. Different plotting backends return different kinds of output.
'''
if display is None:
if hasattr(obj, 'display') and (obj.display is not None):
display = obj.display
else:
warnings.warn('Skipping {0}: No display dictionary found.'.format(get_obj_name(obj)),
MARXSVisualizationWarning)
return None
try:
shape = display['shape']
except KeyError:
warnings.warn('Skipping {0}: "shape" not set in display dict.'.format(get_obj_name(obj)),
MARXSVisualizationWarning)
return None
shapes = [s.strip() for s in shape.split(';')]
for s in shapes:
if s == 'None':
return None
elif s in plot_registry:
# turn into valid color tuple
display['color'] = get_color(display)
return plot_registry[s](obj, display, **kwargs)
else:
warnings.warn('Skipping {0}: No function to plot {1}.'.format(get_obj_name(obj), shape),
MARXSVisualizationWarning)
return None
def get_color(d):
'''Look for color information in dictionary.
If missing, return white.
This function checks if the `d['color']` is a valid RGB tuple and if
not it imports a `matplotlib.colors.ColorConverter` to convert any
matplotlib compatible string to an RGB tuple.
Parameters
----------
d : dict
Color information should be present in ``d['color']``.
Returns
-------
color : tuple
RGB tuple with each element in the range 0..1
'''
if 'color' not in d:
return (1., 1., 1.)
else:
c = d['color']
# check if this is a tuple of three floats n the range 0..1
# or can be converted to one
try:
cout = tuple(c)
if len(cout) != 3:
raise TypeError
for a in cout:
if not isinstance(a, float) or (a < 0.) or (a > 1.):
raise TypeError
return cout
except TypeError:
# It's a hex or string. Let matplotlib deal with that.
import matplotlib.colors
return matplotlib.colors.colorConverter.to_rgb(c)
def color_tuple_to_hex(color):
'''Convert color tuple to hex string.
Parameters
----------
color : tuple
tuple has three elements (rgb) that are floats betwen 0 and 1
or ints between 0 and 255.
Returns
-------
hexstring : string
string encoding that number as hex
'''
if all([isinstance(a, float) for a in color]):
if any(i < 0. for i in color) or any(i > 1. for i in color):
raise ValueError('Float values in color tuple must be between 0 and 1.')
out = hex(int(color[0] * 256**2 * 255 + color[1] * 256 * 255 + color[2] * 255))
elif all([isinstance(a, int) for a in color]):
if any(i < 0 for i in color) or any(i > 255 for i in color):
raise ValueError('Int values in color tuple must be between 0 and 255.')
out = hex(color[0] * 256**2 + color[1] * 256 + color[2])
else:
raise ValueError('Input tuple must be all float or all int.')
# Now pad with zeros if required
return out[:2] + out[2:].zfill(6)
def plane_with_hole(outer, inner):
'''Triangulation of a plane with an inner hole
This function constructs a triangulation for a plane with an inner hole, e.g.
a rectangular plane where an inner circle is cut out.
Parameters
----------
outer, inner : np.ndarray of shape (n, 3)
Coordinates in x,y,z of points that define the inner and outer
boundary. ``outer`` and ``inner`` can have a different number of
points, but points need to be listed in the same orientation
(e.g. clockwise) for both and the starting points need to have a
similar angle as seen from the center (e.g. for a plane with z=0, both
``outer`` and ``inner`` could list a point close to the y-axis first.
Returns
-------
xyz : nd.array
stacked ``outer`` and ``inner``.
triangles : nd.array
List of the indices. Each row has the index of three points in ``xyz``.
Examples
--------
In this example, we make a square and cut out a smaller square in the middle.
>>> import numpy as np
>>> from marxs.visualization.utils import plane_with_hole
>>> outer = np.array([[-1, -1, 1, 1], [-1, 1, 1, -1], [0,0,0,0]]).T
>>> inner = 0.5 * outer
>>> xyz, triangles = plane_with_hole(outer, inner)
>>> triangles
array([[0, 4, 5],
[0, 1, 5],
[1, 5, 6],
[1, 2, 6],
[2, 6, 7],
[2, 3, 7],
[3, 7, 4],
[3, 0, 4]])
'''
n_out = outer.shape[0]
n_in = inner.shape[0]
n = n_out + n_in
triangles = np.zeros((n, 3), dtype=int)
xyz = np.vstack([outer, inner])
i_in = 0
i_out = 0
for i in range(n_out + n_in):
if i/n >= i_in/n_in:
triangles[i, :] = [i_out, n_out + i_in, n_out + ((i_in + 1) % n_in)]
i_in += 1
else:
triangles[i, :] = [i_out, (i_out + 1) % n_out, n_out + (i_in % n_in)]
i_out = (i_out + 1) % n_out
return xyz, triangles
def combine_disjoint_triangulations(list_xyz, list_triangles):
'''Combine two disjoint triangulations into one set of points
This function combines two entirely separate triangulations into one set
of point and triangles. Plotting the combined triangulation should have the
same effect as plotting each triangulation separately. This function is used
for plotting apertures where we have e.g. an open ring. This can be plotted
as an inner circle plus an outer shape with a hole in it.
Parameters
----------
list_xyz : list of `np.array`
Each array holds xyz values for one triangulation
list_triangles : list of nd.array
Each array holds the list of the indices for one triangulation.
Returns
-------
xyz : nd.array
stacked ``outer`` and ``inner``.
triangles : nd.array
List of the indices. Each row has the index of three points in ``xyz``.
'''
xyz = np.vstack(list_xyz)
n_offset = np.cumsum([a.shape[0] for a in list_xyz])
n_offset -= n_offset[0]
triangles = np.vstack([list_triangles[i] + n_offset[i] for i in range(len(n_offset))])
return xyz, triangles
| gpl-3.0 |
luseiee/machineLearningWithSpark | chapter03/movielens_analysis.py | 1 | 3783 | ### $SPARK_HOME/bin/spark-submit movielens_analysis.py
### Data visualization of movielens dataset.
from pyspark import SparkContext
import matplotlib.pyplot as plt
import numpy as np
sc = SparkContext("local", "Movielens Analysis")
sc.setLogLevel("ERROR")
PATH = "/Users/c/xueshu/bigdata/machineLearningWithSpark"
## 1. Do some statistics
user_data = sc.textFile("%s/ml-100k/u.user" % PATH)
user_fields = user_data.map(lambda line: line.split('|'))
num_users = user_fields.count()
num_genders = user_fields.map(lambda fields: fields[2]).distinct().count()
num_occupations = user_fields.map(lambda fields: fields[3]).distinct().count()
num_zipcodes = user_fields.map(lambda fields: fields[4]).distinct().count()
print("Users:%d, genders:%d, occupations:%d, ZIP codes:%d"
%(num_users, num_genders, num_occupations, num_zipcodes))
## 2. Draw histgrams of age
ages = user_fields.map(lambda fields: int(fields[1])).collect()
fig1 = plt.figure()
plt.hist(ages, bins = 20, edgecolor='black')
plt.title("Age histogram")
plt.xlabel("Age")
plt.ylabel("Number")
## 3. Draw job distribution
occupations = user_fields.map(lambda
fields: (fields[3], 1)).reduceByKey(lambda
x, y: x + y).sortBy(lambda x: x[1]).collect()
fig2 = plt.figure(figsize=(9, 5), dpi=100)
x_axis = [occu[0] for occu in occupations]
y_axis = [occu[1] for occu in occupations]
pos = np.arange(len(x_axis))
width = 1.0
ax = plt.axes()
ax.set_xticks(pos + 0.5)
ax.set_xticklabels(x_axis)
plt.bar(pos, y_axis, width, edgecolor='black')
plt.xticks(rotation=30)
plt.ylabel("Number")
plt.title("Job distribution")
## 4. Draw movie year distribution
movie_data = sc.textFile("%s/ml-100k/u.item" % PATH)
movie_fields = movie_data.map(lambda line: line.split('|'))
num_movies = movie_fields.count()
def get_year(fields):
try:
return int(fields[2][-4:])
except:
return 1900
movie_years = movie_fields.map(get_year)
movie_ages = movie_years.filter(lambda
year: year != 1900).map(lambda year: 1998 - year).countByValue()
y_axis = movie_ages.values()
x_axis = movie_ages.keys()
fig3 = plt.figure()
plt.bar(x_axis, y_axis, edgecolor='black')
plt.title("Movie age distribution")
plt.xlabel("Movie age")
plt.ylabel("Number")
## 5. Do statistics on the rating data
rating_data = sc.textFile("%s/ml-100k/u.data" % PATH)
rating_fields = rating_data.map(lambda line: line.split('\t'))
ratings = rating_fields.map(lambda fields: int(fields[2]))
num_ratings = ratings.count()
min_rating = ratings.reduce(lambda x, y: min(x, y))
max_rating = ratings.reduce(lambda x, y: max(x, y))
mean_rating = ratings.reduce(lambda x, y: x + y) / float(num_ratings)
median_rating = np.median(ratings.collect())
ratings_per_user = num_ratings / float(num_users)
ratings_per_movie = num_ratings / float(num_movies)
print("Min rating:%d, max rating:%d, average rating:%.2f, median rating:%d"
%(min_rating, max_rating, mean_rating, median_rating))
print("Average # of rating per user: %.1f" %(ratings_per_user))
print("Average # of rating per movie: %.1f" %(ratings_per_movie))
## 6. Draw movie rating distribution
ratings_count = ratings.countByValue()
x_axis = ratings_count.keys()
y_axis = ratings_count.values()
fig4 = plt.figure()
plt.bar(x_axis, y_axis, edgecolor='black')
plt.title("Movie ratings distribution")
plt.xlabel("Movie rate")
plt.ylabel("Number")
## 7. User rating number distribution
user_rating_number = rating_fields.map(lambda
fields: (int(fields[0]), 1)).reduceByKey(lambda
x, y: x + y).sortBy(lambda x: x[1], ascending=False)
x_axis = np.arange(num_users)
y_axis = user_rating_number.values().collect()
fig5 = plt.figure(figsize=(9, 5), dpi=100)
plt.bar(x_axis, y_axis)
plt.title("User rating numbers rank")
plt.xlabel("Number of movie rated")
plt.ylabel("User")
plt.show() | mit |
kayarre/Tools | hist/process_case.py | 1 | 8337 | # import pickle
# import pyvips
import os
import pandas as pd
# import numpy as np
import SimpleITK as sitk
import networkx as nx
import pickle
import copy
# import itk
import matplotlib.pyplot as plt
# from ipywidgets import interact, fixed
# from IPython.display import clear_output
import logging
logging.basicConfig(level=logging.WARNING)
# logging.basicConfig(level=logging.DEBUG)
# from read_vips import parse_vips
from utils import get_additional_info
from utils import _calculate_composite
from utils import read_tiff_image
from utils import get_mean_edges
from utils import resample_rgb
from stage_1_registration import stage_1_transform
from stage_1_parallel import stage_1_parallel_metric
from stage_1b_registration import stage_1b_transform
from stage_1c_registration import stage_1c_transform
from stage_2_registration import stage_2_transform
from stage_3_registration import stage_3_transform
def main():
# this register the cropped images
# df_path = "/Volumes/SD/caseFiles/vwi_proj/process_df.pkl"
# crop_dir = '/Volumes/SD/caseFiles/vwi_proc'
# csv = pd.read_csv(os.path.join(crop_dir, "case_1.csv"))
# df = pd.read_pickle(os.path.join(crop_dir, "case_1.pkl"))
case_file = "case_1.pkl"
top_dir = "/Volumes/SD/caseFiles"
#top_dir = "/media/store/krs/caseFiles"
# top_dir = "/media/sansomk/510808DF6345C808/caseFiles"
df = pd.read_pickle(os.path.join(top_dir, case_file))
# print(df.head())
#relabel_paths = True
#in_dir = "vwi_proj"
#out_dir = "vwi_proc"
trans_dir = "vwi_trans"
image_dir = "images"
test_dir = "test"
resample_dir = "resample"
#mask_dir = "masks"
#print(df.head())
#print(df.columns)
# print(df["Image_ID"].values.dtype)
# study_id = "1"
# test_reg = register_series()
# this is the registration loop
reference_index = 0
reg_n = {}
epsilon = 2
lambda_ = 1.0
bad_keys = {(23,21) : 1.030835089459151, (10,11) : 4.908738521234052 } #, (11,10) :]
# create a new graph
G = nx.DiGraph()
n_rows = len(df.index)
init_max = 256
stage_1_max = 512
elstix_max = 1024
for i in range(n_rows):
if (i in G):
f_r = G.nodes[i]['row_data']
f_pg_info = G.nodes[i]['page_data']
else:
# this is the fixed image
f_r = df.iloc[i] # the data
f_pg_info = get_additional_info(f_r)
G.add_node(i, row_data=f_r, page_data=f_pg_info)
for j in range(i - epsilon, i + epsilon):
# this should keep from registring with the same image
if (j > 0) and (j < n_rows) and (j != i):
# this is the moving image
if (j in G):
t_r = G.nodes[j]['row_data']
t_pg_info = G.nodes[j]['page_data']
else:
# this is the fixed image
t_r = df.iloc[j] # the data
t_pg_info = get_additional_info(t_r)
G.add_node(j, row_data=t_r, page_data=t_pg_info)
# this is the
reg_key = (i, j)
reg_n[reg_key] = dict(
f_row=f_r, t_row=t_r, f_page=f_pg_info, t_page=t_pg_info
)
print(reg_key)
# if (reg_key not in [(10,11), (23,21)]):
# continue
initial_params, fig_list = stage_1_parallel_metric(
reg_dict=reg_n[reg_key], n_max=init_max
)
#print(initial_params)
max_pix = copy.deepcopy(init_max)
for fig_dict in fig_list:
for key, fig in fig_dict.items():
fig_name = os.path.join(test_dir, "fig_init_{0}_{1}_{2}_{3}.png".format(i, j, key, int(max_pix)))
fig_path = os.path.join(top_dir, fig_name)
fig.savefig(fig_path)
plt.close(fig)
max_pix /=2
# print(initial_params)
#print(initial_params)
#print(init_params_flip)
print(initial_params["best_metric"], initial_params["best_angle"],
initial_params["best_metric_type"])
#print(init_params_flip["best_metric"], init_params_flip["best_angle"])
# quit()
if (reg_key in bad_keys.keys()):
initial_params["best_angle"] = bad_keys[reg_key]
best_reg_s1, rigid_fig = stage_1_transform(
reg_dict=reg_n[reg_key], n_max=stage_1_max, init_params=initial_params
)
# print(
# best_reg_s1["transform"]
# )
best_reg_s1b, affine_fig = stage_1b_transform(
reg_dict=reg_n[reg_key], n_max=elstix_max, initial_transform=best_reg_s1
)
# print(
# best_reg_s1b["transform"]
# )
# if (reg_key in bad_keys.keys()):
# best_reg_s1b, affine_fig = stage_1b_transform(
# reg_dict=reg_n[reg_key], n_max=elstix_max, initial_transform=best_reg_s1
# )
# else:
# best_reg_s1b, affine_fig = stage_1c_transform(
# reg_dict=reg_n[reg_key], n_max=elstix_max, initial_transform=best_reg_s1
# )
fig_name = os.path.join(image_dir, "fig_rigid_{0}_{1}.png".format(i,j))
fig_path = os.path.join(top_dir, fig_name)
rigid_fig.savefig(fig_path)
plt.close(rigid_fig)
fig_name = os.path.join(image_dir, "fig_affine_{0}_{1}.png".format(i,j))
fig_path = os.path.join(top_dir, fig_name)
affine_fig.savefig(fig_path)
plt.close(affine_fig)
affine_name = os.path.join(trans_dir, "affine_{0}_{1}.h5".format(i,j))
transform_path = os.path.join(top_dir, affine_name)
sitk.WriteTransform(best_reg_s1["transform"], transform_path)
abs_ij = abs(i-j)
# this is the metric from the possum framework
weight = (1.0 + best_reg_s1b["measure"]) * abs_ij * (1.0 + lambda_)**(abs_ij)
G.add_edge(i, j, weight = weight,
measure = best_reg_s1["measure"],
transform = best_reg_s1["transform"],
tiff_page = best_reg_s1["tiff_page"],
transform_file_name = affine_name )
# best_reg_s2 = stage_2_transform(reg_dict=reg_n[-1], n_max=512, initial_transform=best_reg_s1b)
# best_reg_s3 = stage_3_transform(reg_dict=reg_n[-1], n_max=2048, initial_transform=best_reg_s2)
#print(best_reg_s1)
#print()
#print(best_reg_s1b)
#quit()
#best_reg_s2 = stage_2_transform(reg_dict=reg_n[-1], n_max=512, initial_transform=best_reg_s1b)
#best_reg_s3 = stage_3_transform(reg_dict=reg_n[-1], n_max=2048, initial_transform=best_reg_s2)
#print(best_reg_s1["measure"], best_reg_s1b["measure"])
# save the registration data
reg_path = os.path.join(top_dir, case_file.split(".")[0] + "_reg_data.pkl" )
with open(reg_path, 'wb') as f:
pickle.dump(reg_n, f)
# remove transforms in case they can't be pickled
new_G = G.copy()
for n1, n2, d in new_G.edges(data=True):
for att in ["transform"]:
nothing = d.pop(att, None)
pickle_path2 = os.path.join(top_dir, case_file.split(".")[0] + "_2.gpkl" )
nx.write_gpickle(new_G, pickle_path2)
pickle_path = os.path.join(top_dir, case_file.split(".")[0] + ".gpkl" )
try:
nx.write_gpickle(G, pickle_path)
except Exception as e:
#except pickle.PicklingError as e:
print(" Cannot pickle this thing {0}".format(e))
# TODO make another script that can generate this from saved transforms and graph
for j in range(n_rows):
# j is the moving image
trans_list = _calculate_composite(G, reference_index, j)
# Instanciate composite transform which will handle all the partial
# transformations.
composite_transform = sitk.Transform(2, sitk.sitkEuler )
# Fill the composite transformation with the partial transformations:
for transform in trans_list:
composite_transform.AddTransform(transform)
reg_key = (reference_index, j)
if (reg_key in reg_n.keys()):
f_sitk, t_sitk = read_tiff_image(reg_n[reg_key], page_index = 4)
new_image = resample_rgb(composite_transform,
f_sitk,
t_sitk,
mean = get_mean_edges(t_sitk)
)
resample_image = os.path.join(top_dir, resample_dir, "resample_affine_{0}.png".format(j))
writer = sitk.ImageFileWriter()
writer.SetFileName(resample_image)
writer.Execute(new_image)
if __name__ == "__main__":
main()
| bsd-2-clause |
nhejazi/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 18 | 48928 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors.base import VALID_METRICS_SPARSE, VALID_METRICS
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_valid_brute_metric_for_auto_algorithm():
X = rng.rand(12, 12)
Xcsr = csr_matrix(X)
# check that there is a metric that is valid for brute
# but not ball_tree (so we actually test something)
assert_in("cosine", VALID_METRICS['brute'])
assert_false("cosine" in VALID_METRICS['ball_tree'])
# Metric which don't required any additional parameter
require_params = ['mahalanobis', 'wminkowski', 'seuclidean']
for metric in VALID_METRICS['brute']:
if metric != 'precomputed' and metric not in require_params:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric).fit(X)
nn.kneighbors(X)
elif metric == 'precomputed':
X_precomputed = rng.random_sample((10, 4))
Y_precomputed = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X_precomputed, metric='euclidean')
DYX = metrics.pairwise_distances(Y_precomputed, X_precomputed,
metric='euclidean')
nb_p = neighbors.NearestNeighbors(n_neighbors=3)
nb_p.fit(DXX)
nb_p.kneighbors(DYX)
for metric in VALID_METRICS_SPARSE['brute']:
if metric != 'precomputed' and metric not in require_params:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric).fit(Xcsr)
nn.kneighbors(Xcsr)
# Metric with parameter
VI = np.dot(X, X.T)
list_metrics = [('seuclidean', dict(V=rng.rand(12))),
('wminkowski', dict(w=rng.rand(12))),
('mahalanobis', dict(VI=VI))]
for metric, params in list_metrics:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric,
metric_params=params).fit(X)
nn.kneighbors(X)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/_cm.py | 6 | 67361 | """
Nothing here but dictionaries for generating LinearSegmentedColormaps,
and a dictionary of these dictionaries.
Documentation for each is in pyplot.colormaps(). Please update this
with the purpose and type of your colormap if you add data for one here.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.cbook import warn_deprecated
import numpy as np
_binary_data = {
'red': ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue': ((0., 1., 1.), (1., 0., 0.))
}
_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),
(0.746032, 0.652778, 0.652778),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.365079, 0.444444, 0.444444),
(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),
(0.809524, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),
(1.0, 0.4975, 0.4975))}
_flag_data = {
'red': lambda x: 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5,
'green': lambda x: np.sin(x * 31.5 * np.pi),
'blue': lambda x: 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5,
}
_prism_data = {
'red': lambda x: 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67,
'green': lambda x: 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33,
'blue': lambda x: -1.1 * np.sin((x * 20.9) * np.pi),
}
def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0):
"""Return custom data dictionary of (r,g,b) conversion functions, which
can be used with :func:`register_cmap`, for the cubehelix color scheme.
Unlike most other color schemes cubehelix was designed by D.A. Green to
be monotonically increasing in terms of perceived brightness.
Also, when printed on a black and white postscript printer, the scheme
results in a greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b values produced
can be visualised as a squashed helix around the diagonal in the
r,g,b color cube.
For a unit color cube (i.e. 3-D coordinates for r,g,b each in the
range 0 to 1) the color scheme starts at (r,g,b) = (0,0,0), i.e. black,
and finishes at (r,g,b) = (1,1,1), i.e. white. For some fraction *x*,
between 0 and 1, the color is the corresponding grey value at that
fraction along the black to white diagonal (x,x,x) plus a color
element. This color element is calculated in a plane of constant
perceived intensity and controlled by the following parameters.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
gamma gamma factor to emphasise either low intensity values
(gamma < 1), or high intensity values (gamma > 1);
defaults to 1.0.
s the start color; defaults to 0.5 (i.e. purple).
r the number of r,g,b rotations in color that are made
from the start to the end of the color scheme; defaults
to -1.5 (i.e. -> B -> G -> R -> B).
h the hue parameter which controls how saturated the
colors are. If this parameter is zero then the color
scheme is purely a greyscale; defaults to 1.0.
========= =======================================================
"""
def get_color_function(p0, p1):
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x ** gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = h * xg * (1 - xg) / 2
phi = 2 * np.pi * (s / 3 + r * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
return {
'red': get_color_function(-0.14861, 1.78277),
'green': get_color_function(-0.29227, -0.90649),
'blue': get_color_function(1.97294, 0.0),
}
_cubehelix_data = cubehelix()
_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0))
# Gnuplot palette functions
gfunc = {
0: lambda x: 0,
1: lambda x: 0.5,
2: lambda x: 1,
3: lambda x: x,
4: lambda x: x ** 2,
5: lambda x: x ** 3,
6: lambda x: x ** 4,
7: lambda x: np.sqrt(x),
8: lambda x: np.sqrt(np.sqrt(x)),
9: lambda x: np.sin(x * np.pi / 2),
10: lambda x: np.cos(x * np.pi / 2),
11: lambda x: np.abs(x - 0.5),
12: lambda x: (2 * x - 1) ** 2,
13: lambda x: np.sin(x * np.pi),
14: lambda x: np.abs(np.cos(x * np.pi)),
15: lambda x: np.sin(x * 2 * np.pi),
16: lambda x: np.cos(x * 2 * np.pi),
17: lambda x: np.abs(np.sin(x * 2 * np.pi)),
18: lambda x: np.abs(np.cos(x * 2 * np.pi)),
19: lambda x: np.abs(np.sin(x * 4 * np.pi)),
20: lambda x: np.abs(np.cos(x * 4 * np.pi)),
21: lambda x: 3 * x,
22: lambda x: 3 * x - 1,
23: lambda x: 3 * x - 2,
24: lambda x: np.abs(3 * x - 1),
25: lambda x: np.abs(3 * x - 2),
26: lambda x: (3 * x - 1) / 2,
27: lambda x: (3 * x - 2) / 2,
28: lambda x: np.abs((3 * x - 1) / 2),
29: lambda x: np.abs((3 * x - 2) / 2),
30: lambda x: x / 0.32 - 0.78125,
31: lambda x: 2 * x - 0.84,
32: lambda x: gfunc32(x),
33: lambda x: np.abs(2 * x - 0.5),
34: lambda x: 2 * x,
35: lambda x: 2 * x - 0.5,
36: lambda x: 2 * x - 1.
}
def gfunc32(x):
ret = np.zeros(len(x))
m = (x < 0.25)
ret[m] = 4 * x[m]
m = (x >= 0.25) & (x < 0.92)
ret[m] = -2 * x[m] + 1.84
m = (x >= 0.92)
ret[m] = x[m] / 0.08 - 11.5
return ret
_gnuplot_data = {
'red': gfunc[7],
'green': gfunc[5],
'blue': gfunc[15],
}
_gnuplot2_data = {
'red': gfunc[30],
'green': gfunc[31],
'blue': gfunc[32],
}
_ocean_data = {
'red': gfunc[23],
'green': gfunc[28],
'blue': gfunc[3],
}
_afmhot_data = {
'red': gfunc[34],
'green': gfunc[35],
'blue': gfunc[36],
}
_rainbow_data = {
'red': gfunc[33],
'green': gfunc[13],
'blue': gfunc[10],
}
_seismic_data = (
(0.0, 0.0, 0.3), (0.0, 0.0, 1.0),
(1.0, 1.0, 1.0), (1.0, 0.0, 0.0),
(0.5, 0.0, 0.0))
_terrain_data = (
(0.00, (0.2, 0.2, 0.6)),
(0.15, (0.0, 0.6, 1.0)),
(0.25, (0.0, 0.8, 0.4)),
(0.50, (1.0, 1.0, 0.6)),
(0.75, (0.5, 0.36, 0.33)),
(1.00, (1.0, 1.0, 1.0)))
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),
(0.365079, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.746032, 0.000000, 0.000000),
(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),
(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),
(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),
(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),
(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),
(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),
(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),
(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89, 1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125, 0, 0), (0.375, 1, 1), (0.64, 1, 1),
(0.91, 0, 0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1),
(0.65, 0, 0), (1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178), (0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),
(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),
(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),
(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),
(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),
(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),
(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),
(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),
(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),
(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),
(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),
(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),
(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),
(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),
(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),
(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),
(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),
(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),
(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),
(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),
(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),
(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),
(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),
(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),
(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),
(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),
(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),
(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),
(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),
(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),
(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),
(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),
(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),
(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),
(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),
(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),
(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),
(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),
(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),
(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),
(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),
(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),
(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),
(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),
(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),
(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),
(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),
(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),
(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),
(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),
(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),
(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),
(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),
(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),
(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),
(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),
(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),
(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),
(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),
(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),
(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.), (0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),
(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),
(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),
(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),
(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),
(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),
(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),
(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),
(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),
(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),
(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),
(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),
(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),
(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),
(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),
(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),
(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),
(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),
(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),
(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),
(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),
(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),
(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),
(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),
(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),
(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),
(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),
(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),
(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),
(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),
(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167), (1.0, 1.0, 1.0))}
_spring_data = {'red': ((0., 1., 1.), (1.0, 1.0, 1.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.), (1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5), (1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4), (1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.), (1.0, 0.0, 0.0)),
'green': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.), (1.0, 0.5, 0.5))}
_nipy_spectral_data = {
'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
# RGB values taken from Brewer's Excel sheet, divided by 255
_Blues_data = (
(0.96862745098039216, 0.98431372549019602, 1.0 ),
(0.87058823529411766, 0.92156862745098034, 0.96862745098039216),
(0.77647058823529413, 0.85882352941176465, 0.93725490196078431),
(0.61960784313725492, 0.792156862745098 , 0.88235294117647056),
(0.41960784313725491, 0.68235294117647061, 0.83921568627450982),
(0.25882352941176473, 0.5725490196078431 , 0.77647058823529413),
(0.12941176470588237, 0.44313725490196076, 0.70980392156862748),
(0.03137254901960784, 0.31764705882352939, 0.61176470588235299),
(0.03137254901960784, 0.18823529411764706, 0.41960784313725491)
)
_BrBG_data = (
(0.32941176470588235, 0.18823529411764706, 0.0196078431372549 ),
(0.5490196078431373 , 0.31764705882352939, 0.0392156862745098 ),
(0.74901960784313726, 0.50588235294117645, 0.17647058823529413),
(0.87450980392156863, 0.76078431372549016, 0.49019607843137253),
(0.96470588235294119, 0.90980392156862744, 0.76470588235294112),
(0.96078431372549022, 0.96078431372549022, 0.96078431372549022),
(0.7803921568627451 , 0.91764705882352937, 0.89803921568627454),
(0.50196078431372548, 0.80392156862745101, 0.75686274509803919),
(0.20784313725490197, 0.59215686274509804, 0.5607843137254902 ),
(0.00392156862745098, 0.4 , 0.36862745098039218),
(0.0 , 0.23529411764705882, 0.18823529411764706)
)
_BuGn_data = (
(0.96862745098039216, 0.9882352941176471 , 0.99215686274509807),
(0.89803921568627454, 0.96078431372549022, 0.97647058823529409),
(0.8 , 0.92549019607843142, 0.90196078431372551),
(0.6 , 0.84705882352941175, 0.78823529411764703),
(0.4 , 0.76078431372549016, 0.64313725490196083),
(0.25490196078431371, 0.68235294117647061, 0.46274509803921571),
(0.13725490196078433, 0.54509803921568623, 0.27058823529411763),
(0.0 , 0.42745098039215684, 0.17254901960784313),
(0.0 , 0.26666666666666666, 0.10588235294117647)
)
_BuPu_data = (
(0.96862745098039216, 0.9882352941176471 , 0.99215686274509807),
(0.8784313725490196 , 0.92549019607843142, 0.95686274509803926),
(0.74901960784313726, 0.82745098039215681, 0.90196078431372551),
(0.61960784313725492, 0.73725490196078436, 0.85490196078431369),
(0.5490196078431373 , 0.58823529411764708, 0.77647058823529413),
(0.5490196078431373 , 0.41960784313725491, 0.69411764705882351),
(0.53333333333333333, 0.25490196078431371, 0.61568627450980395),
(0.50588235294117645, 0.05882352941176471, 0.48627450980392156),
(0.30196078431372547, 0.0 , 0.29411764705882354)
)
_GnBu_data = (
(0.96862745098039216, 0.9882352941176471 , 0.94117647058823528),
(0.8784313725490196 , 0.95294117647058818, 0.85882352941176465),
(0.8 , 0.92156862745098034, 0.77254901960784317),
(0.6588235294117647 , 0.8666666666666667 , 0.70980392156862748),
(0.4823529411764706 , 0.8 , 0.7686274509803922 ),
(0.30588235294117649, 0.70196078431372544, 0.82745098039215681),
(0.16862745098039217, 0.5490196078431373 , 0.74509803921568629),
(0.03137254901960784, 0.40784313725490196, 0.67450980392156867),
(0.03137254901960784, 0.25098039215686274, 0.50588235294117645)
)
_Greens_data = (
(0.96862745098039216, 0.9882352941176471 , 0.96078431372549022),
(0.89803921568627454, 0.96078431372549022, 0.8784313725490196 ),
(0.7803921568627451 , 0.9137254901960784 , 0.75294117647058822),
(0.63137254901960782, 0.85098039215686272, 0.60784313725490191),
(0.45490196078431372, 0.7686274509803922 , 0.46274509803921571),
(0.25490196078431371, 0.6705882352941176 , 0.36470588235294116),
(0.13725490196078433, 0.54509803921568623, 0.27058823529411763),
(0.0 , 0.42745098039215684, 0.17254901960784313),
(0.0 , 0.26666666666666666, 0.10588235294117647)
)
_Greys_data = (
(1.0 , 1.0 , 1.0 ),
(0.94117647058823528, 0.94117647058823528, 0.94117647058823528),
(0.85098039215686272, 0.85098039215686272, 0.85098039215686272),
(0.74117647058823533, 0.74117647058823533, 0.74117647058823533),
(0.58823529411764708, 0.58823529411764708, 0.58823529411764708),
(0.45098039215686275, 0.45098039215686275, 0.45098039215686275),
(0.32156862745098042, 0.32156862745098042, 0.32156862745098042),
(0.14509803921568629, 0.14509803921568629, 0.14509803921568629),
(0.0 , 0.0 , 0.0 )
)
_Oranges_data = (
(1.0 , 0.96078431372549022, 0.92156862745098034),
(0.99607843137254903, 0.90196078431372551, 0.80784313725490198),
(0.99215686274509807, 0.81568627450980391, 0.63529411764705879),
(0.99215686274509807, 0.68235294117647061, 0.41960784313725491),
(0.99215686274509807, 0.55294117647058827, 0.23529411764705882),
(0.94509803921568625, 0.41176470588235292, 0.07450980392156863),
(0.85098039215686272, 0.28235294117647058, 0.00392156862745098),
(0.65098039215686276, 0.21176470588235294, 0.01176470588235294),
(0.49803921568627452, 0.15294117647058825, 0.01568627450980392)
)
_OrRd_data = (
(1.0 , 0.96862745098039216, 0.92549019607843142),
(0.99607843137254903, 0.90980392156862744, 0.78431372549019607),
(0.99215686274509807, 0.83137254901960789, 0.61960784313725492),
(0.99215686274509807, 0.73333333333333328, 0.51764705882352946),
(0.9882352941176471 , 0.55294117647058827, 0.34901960784313724),
(0.93725490196078431, 0.396078431372549 , 0.28235294117647058),
(0.84313725490196079, 0.18823529411764706, 0.12156862745098039),
(0.70196078431372544, 0.0 , 0.0 ),
(0.49803921568627452, 0.0 , 0.0 )
)
_PiYG_data = (
(0.55686274509803924, 0.00392156862745098, 0.32156862745098042),
(0.77254901960784317, 0.10588235294117647, 0.49019607843137253),
(0.87058823529411766, 0.46666666666666667, 0.68235294117647061),
(0.94509803921568625, 0.71372549019607845, 0.85490196078431369),
(0.99215686274509807, 0.8784313725490196 , 0.93725490196078431),
(0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
(0.90196078431372551, 0.96078431372549022, 0.81568627450980391),
(0.72156862745098038, 0.88235294117647056, 0.52549019607843139),
(0.49803921568627452, 0.73725490196078436, 0.25490196078431371),
(0.30196078431372547, 0.5725490196078431 , 0.12941176470588237),
(0.15294117647058825, 0.39215686274509803, 0.09803921568627451)
)
_PRGn_data = (
(0.25098039215686274, 0.0 , 0.29411764705882354),
(0.46274509803921571, 0.16470588235294117, 0.51372549019607838),
(0.6 , 0.4392156862745098 , 0.6705882352941176 ),
(0.76078431372549016, 0.6470588235294118 , 0.81176470588235294),
(0.90588235294117647, 0.83137254901960789, 0.90980392156862744),
(0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
(0.85098039215686272, 0.94117647058823528, 0.82745098039215681),
(0.65098039215686276, 0.85882352941176465, 0.62745098039215685),
(0.35294117647058826, 0.68235294117647061, 0.38039215686274508),
(0.10588235294117647, 0.47058823529411764, 0.21568627450980393),
(0.0 , 0.26666666666666666, 0.10588235294117647)
)
_PuBu_data = (
(1.0 , 0.96862745098039216, 0.98431372549019602),
(0.92549019607843142, 0.90588235294117647, 0.94901960784313721),
(0.81568627450980391, 0.81960784313725488, 0.90196078431372551),
(0.65098039215686276, 0.74117647058823533, 0.85882352941176465),
(0.45490196078431372, 0.66274509803921566, 0.81176470588235294),
(0.21176470588235294, 0.56470588235294117, 0.75294117647058822),
(0.0196078431372549 , 0.4392156862745098 , 0.69019607843137254),
(0.01568627450980392, 0.35294117647058826, 0.55294117647058827),
(0.00784313725490196, 0.2196078431372549 , 0.34509803921568627)
)
_PuBuGn_data = (
(1.0 , 0.96862745098039216, 0.98431372549019602),
(0.92549019607843142, 0.88627450980392153, 0.94117647058823528),
(0.81568627450980391, 0.81960784313725488, 0.90196078431372551),
(0.65098039215686276, 0.74117647058823533, 0.85882352941176465),
(0.40392156862745099, 0.66274509803921566, 0.81176470588235294),
(0.21176470588235294, 0.56470588235294117, 0.75294117647058822),
(0.00784313725490196, 0.50588235294117645, 0.54117647058823526),
(0.00392156862745098, 0.42352941176470588, 0.34901960784313724),
(0.00392156862745098, 0.27450980392156865, 0.21176470588235294)
)
_PuOr_data = (
(0.49803921568627452, 0.23137254901960785, 0.03137254901960784),
(0.70196078431372544, 0.34509803921568627, 0.02352941176470588),
(0.8784313725490196 , 0.50980392156862742, 0.07843137254901961),
(0.99215686274509807, 0.72156862745098038, 0.38823529411764707),
(0.99607843137254903, 0.8784313725490196 , 0.71372549019607845),
(0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
(0.84705882352941175, 0.85490196078431369, 0.92156862745098034),
(0.69803921568627447, 0.6705882352941176 , 0.82352941176470584),
(0.50196078431372548, 0.45098039215686275, 0.67450980392156867),
(0.32941176470588235, 0.15294117647058825, 0.53333333333333333),
(0.17647058823529413, 0.0 , 0.29411764705882354)
)
_PuRd_data = (
(0.96862745098039216, 0.95686274509803926, 0.97647058823529409),
(0.90588235294117647, 0.88235294117647056, 0.93725490196078431),
(0.83137254901960789, 0.72549019607843135, 0.85490196078431369),
(0.78823529411764703, 0.58039215686274515, 0.7803921568627451 ),
(0.87450980392156863, 0.396078431372549 , 0.69019607843137254),
(0.90588235294117647, 0.16078431372549021, 0.54117647058823526),
(0.80784313725490198, 0.07058823529411765, 0.33725490196078434),
(0.59607843137254901, 0.0 , 0.2627450980392157 ),
(0.40392156862745099, 0.0 , 0.12156862745098039)
)
_Purples_data = (
(0.9882352941176471 , 0.98431372549019602, 0.99215686274509807),
(0.93725490196078431, 0.92941176470588238, 0.96078431372549022),
(0.85490196078431369, 0.85490196078431369, 0.92156862745098034),
(0.73725490196078436, 0.74117647058823533, 0.86274509803921573),
(0.61960784313725492, 0.60392156862745094, 0.78431372549019607),
(0.50196078431372548, 0.49019607843137253, 0.72941176470588232),
(0.41568627450980394, 0.31764705882352939, 0.63921568627450975),
(0.32941176470588235, 0.15294117647058825, 0.5607843137254902 ),
(0.24705882352941178, 0.0 , 0.49019607843137253)
)
_RdBu_data = (
(0.40392156862745099, 0.0 , 0.12156862745098039),
(0.69803921568627447, 0.09411764705882353, 0.16862745098039217),
(0.83921568627450982, 0.37647058823529411, 0.30196078431372547),
(0.95686274509803926, 0.6470588235294118 , 0.50980392156862742),
(0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ),
(0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
(0.81960784313725488, 0.89803921568627454, 0.94117647058823528),
(0.5725490196078431 , 0.77254901960784317, 0.87058823529411766),
(0.2627450980392157 , 0.57647058823529407, 0.76470588235294112),
(0.12941176470588237, 0.4 , 0.67450980392156867),
(0.0196078431372549 , 0.18823529411764706, 0.38039215686274508)
)
_RdGy_data = (
(0.40392156862745099, 0.0 , 0.12156862745098039),
(0.69803921568627447, 0.09411764705882353, 0.16862745098039217),
(0.83921568627450982, 0.37647058823529411, 0.30196078431372547),
(0.95686274509803926, 0.6470588235294118 , 0.50980392156862742),
(0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ),
(1.0 , 1.0 , 1.0 ),
(0.8784313725490196 , 0.8784313725490196 , 0.8784313725490196 ),
(0.72941176470588232, 0.72941176470588232, 0.72941176470588232),
(0.52941176470588236, 0.52941176470588236, 0.52941176470588236),
(0.30196078431372547, 0.30196078431372547, 0.30196078431372547),
(0.10196078431372549, 0.10196078431372549, 0.10196078431372549)
)
_RdPu_data = (
(1.0 , 0.96862745098039216, 0.95294117647058818),
(0.99215686274509807, 0.8784313725490196 , 0.86666666666666667),
(0.9882352941176471 , 0.77254901960784317, 0.75294117647058822),
(0.98039215686274506, 0.62352941176470589, 0.70980392156862748),
(0.96862745098039216, 0.40784313725490196, 0.63137254901960782),
(0.86666666666666667, 0.20392156862745098, 0.59215686274509804),
(0.68235294117647061, 0.00392156862745098, 0.49411764705882355),
(0.47843137254901963, 0.00392156862745098, 0.46666666666666667),
(0.28627450980392155, 0.0 , 0.41568627450980394)
)
_RdYlBu_data = (
(0.6470588235294118 , 0.0 , 0.14901960784313725),
(0.84313725490196079, 0.18823529411764706 , 0.15294117647058825),
(0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ),
(0.99215686274509807, 0.68235294117647061 , 0.38039215686274508),
(0.99607843137254903, 0.8784313725490196 , 0.56470588235294117),
(1.0 , 1.0 , 0.74901960784313726),
(0.8784313725490196 , 0.95294117647058818 , 0.97254901960784312),
(0.6705882352941176 , 0.85098039215686272 , 0.9137254901960784 ),
(0.45490196078431372, 0.67843137254901964 , 0.81960784313725488),
(0.27058823529411763, 0.45882352941176469 , 0.70588235294117652),
(0.19215686274509805, 0.21176470588235294 , 0.58431372549019611)
)
_RdYlGn_data = (
(0.6470588235294118 , 0.0 , 0.14901960784313725),
(0.84313725490196079, 0.18823529411764706 , 0.15294117647058825),
(0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ),
(0.99215686274509807, 0.68235294117647061 , 0.38039215686274508),
(0.99607843137254903, 0.8784313725490196 , 0.54509803921568623),
(1.0 , 1.0 , 0.74901960784313726),
(0.85098039215686272, 0.93725490196078431 , 0.54509803921568623),
(0.65098039215686276, 0.85098039215686272 , 0.41568627450980394),
(0.4 , 0.74117647058823533 , 0.38823529411764707),
(0.10196078431372549, 0.59607843137254901 , 0.31372549019607843),
(0.0 , 0.40784313725490196 , 0.21568627450980393)
)
_Reds_data = (
(1.0 , 0.96078431372549022 , 0.94117647058823528),
(0.99607843137254903, 0.8784313725490196 , 0.82352941176470584),
(0.9882352941176471 , 0.73333333333333328 , 0.63137254901960782),
(0.9882352941176471 , 0.5725490196078431 , 0.44705882352941179),
(0.98431372549019602, 0.41568627450980394 , 0.29019607843137257),
(0.93725490196078431, 0.23137254901960785 , 0.17254901960784313),
(0.79607843137254897, 0.094117647058823528, 0.11372549019607843),
(0.6470588235294118 , 0.058823529411764705, 0.08235294117647058),
(0.40392156862745099, 0.0 , 0.05098039215686274)
)
_Spectral_data = (
(0.61960784313725492, 0.003921568627450980, 0.25882352941176473),
(0.83529411764705885, 0.24313725490196078 , 0.30980392156862746),
(0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ),
(0.99215686274509807, 0.68235294117647061 , 0.38039215686274508),
(0.99607843137254903, 0.8784313725490196 , 0.54509803921568623),
(1.0 , 1.0 , 0.74901960784313726),
(0.90196078431372551, 0.96078431372549022 , 0.59607843137254901),
(0.6705882352941176 , 0.8666666666666667 , 0.64313725490196083),
(0.4 , 0.76078431372549016 , 0.6470588235294118 ),
(0.19607843137254902, 0.53333333333333333 , 0.74117647058823533),
(0.36862745098039218, 0.30980392156862746 , 0.63529411764705879)
)
_YlGn_data = (
(1.0 , 1.0 , 0.89803921568627454),
(0.96862745098039216, 0.9882352941176471 , 0.72549019607843135),
(0.85098039215686272, 0.94117647058823528 , 0.63921568627450975),
(0.67843137254901964, 0.8666666666666667 , 0.55686274509803924),
(0.47058823529411764, 0.77647058823529413 , 0.47450980392156861),
(0.25490196078431371, 0.6705882352941176 , 0.36470588235294116),
(0.13725490196078433, 0.51764705882352946 , 0.2627450980392157 ),
(0.0 , 0.40784313725490196 , 0.21568627450980393),
(0.0 , 0.27058823529411763 , 0.16078431372549021)
)
_YlGnBu_data = (
(1.0 , 1.0 , 0.85098039215686272),
(0.92941176470588238, 0.97254901960784312 , 0.69411764705882351),
(0.7803921568627451 , 0.9137254901960784 , 0.70588235294117652),
(0.49803921568627452, 0.80392156862745101 , 0.73333333333333328),
(0.25490196078431371, 0.71372549019607845 , 0.7686274509803922 ),
(0.11372549019607843, 0.56862745098039214 , 0.75294117647058822),
(0.13333333333333333, 0.36862745098039218 , 0.6588235294117647 ),
(0.14509803921568629, 0.20392156862745098 , 0.58039215686274515),
(0.03137254901960784, 0.11372549019607843 , 0.34509803921568627)
)
_YlOrBr_data = (
(1.0 , 1.0 , 0.89803921568627454),
(1.0 , 0.96862745098039216 , 0.73725490196078436),
(0.99607843137254903, 0.8901960784313725 , 0.56862745098039214),
(0.99607843137254903, 0.7686274509803922 , 0.30980392156862746),
(0.99607843137254903, 0.6 , 0.16078431372549021),
(0.92549019607843142, 0.4392156862745098 , 0.07843137254901961),
(0.8 , 0.29803921568627451 , 0.00784313725490196),
(0.6 , 0.20392156862745098 , 0.01568627450980392),
(0.4 , 0.14509803921568629 , 0.02352941176470588)
)
_YlOrRd_data = (
(1.0 , 1.0 , 0.8 ),
(1.0 , 0.92941176470588238 , 0.62745098039215685),
(0.99607843137254903, 0.85098039215686272 , 0.46274509803921571),
(0.99607843137254903, 0.69803921568627447 , 0.29803921568627451),
(0.99215686274509807, 0.55294117647058827 , 0.23529411764705882),
(0.9882352941176471 , 0.30588235294117649 , 0.16470588235294117),
(0.8901960784313725 , 0.10196078431372549 , 0.10980392156862745),
(0.74117647058823533, 0.0 , 0.14901960784313725),
(0.50196078431372548, 0.0 , 0.14901960784313725)
)
# ColorBrewer's qualitative maps, implemented using ListedColormap
# for use with mpl.colors.NoNorm
_Accent_data = (
(0.49803921568627452, 0.78823529411764703, 0.49803921568627452),
(0.74509803921568629, 0.68235294117647061, 0.83137254901960789),
(0.99215686274509807, 0.75294117647058822, 0.52549019607843139),
(1.0, 1.0, 0.6 ),
(0.2196078431372549, 0.42352941176470588, 0.69019607843137254),
(0.94117647058823528, 0.00784313725490196, 0.49803921568627452),
(0.74901960784313726, 0.35686274509803922, 0.09019607843137254),
(0.4, 0.4, 0.4 ),
)
_Dark2_data = (
(0.10588235294117647, 0.61960784313725492, 0.46666666666666667),
(0.85098039215686272, 0.37254901960784315, 0.00784313725490196),
(0.45882352941176469, 0.4392156862745098, 0.70196078431372544),
(0.90588235294117647, 0.16078431372549021, 0.54117647058823526),
(0.4, 0.65098039215686276, 0.11764705882352941),
(0.90196078431372551, 0.6705882352941176, 0.00784313725490196),
(0.65098039215686276, 0.46274509803921571, 0.11372549019607843),
(0.4, 0.4, 0.4 ),
)
_Paired_data = (
(0.65098039215686276, 0.80784313725490198, 0.8901960784313725 ),
(0.12156862745098039, 0.47058823529411764, 0.70588235294117652),
(0.69803921568627447, 0.87450980392156863, 0.54117647058823526),
(0.2, 0.62745098039215685, 0.17254901960784313),
(0.98431372549019602, 0.60392156862745094, 0.6 ),
(0.8901960784313725, 0.10196078431372549, 0.10980392156862745),
(0.99215686274509807, 0.74901960784313726, 0.43529411764705883),
(1.0, 0.49803921568627452, 0.0 ),
(0.792156862745098, 0.69803921568627447, 0.83921568627450982),
(0.41568627450980394, 0.23921568627450981, 0.60392156862745094),
(1.0, 1.0, 0.6 ),
(0.69411764705882351, 0.34901960784313724, 0.15686274509803921),
)
_Pastel1_data = (
(0.98431372549019602, 0.70588235294117652, 0.68235294117647061),
(0.70196078431372544, 0.80392156862745101, 0.8901960784313725 ),
(0.8, 0.92156862745098034, 0.77254901960784317),
(0.87058823529411766, 0.79607843137254897, 0.89411764705882357),
(0.99607843137254903, 0.85098039215686272, 0.65098039215686276),
(1.0, 1.0, 0.8 ),
(0.89803921568627454, 0.84705882352941175, 0.74117647058823533),
(0.99215686274509807, 0.85490196078431369, 0.92549019607843142),
(0.94901960784313721, 0.94901960784313721, 0.94901960784313721),
)
_Pastel2_data = (
(0.70196078431372544, 0.88627450980392153, 0.80392156862745101),
(0.99215686274509807, 0.80392156862745101, 0.67450980392156867),
(0.79607843137254897, 0.83529411764705885, 0.90980392156862744),
(0.95686274509803926, 0.792156862745098, 0.89411764705882357),
(0.90196078431372551, 0.96078431372549022, 0.78823529411764703),
(1.0, 0.94901960784313721, 0.68235294117647061),
(0.94509803921568625, 0.88627450980392153, 0.8 ),
(0.8, 0.8, 0.8 ),
)
_Set1_data = (
(0.89411764705882357, 0.10196078431372549, 0.10980392156862745),
(0.21568627450980393, 0.49411764705882355, 0.72156862745098038),
(0.30196078431372547, 0.68627450980392157, 0.29019607843137257),
(0.59607843137254901, 0.30588235294117649, 0.63921568627450975),
(1.0, 0.49803921568627452, 0.0 ),
(1.0, 1.0, 0.2 ),
(0.65098039215686276, 0.33725490196078434, 0.15686274509803921),
(0.96862745098039216, 0.50588235294117645, 0.74901960784313726),
(0.6, 0.6, 0.6),
)
_Set2_data = (
(0.4, 0.76078431372549016, 0.6470588235294118 ),
(0.9882352941176471, 0.55294117647058827, 0.3843137254901961 ),
(0.55294117647058827, 0.62745098039215685, 0.79607843137254897),
(0.90588235294117647, 0.54117647058823526, 0.76470588235294112),
(0.65098039215686276, 0.84705882352941175, 0.32941176470588235),
(1.0, 0.85098039215686272, 0.18431372549019609),
(0.89803921568627454, 0.7686274509803922, 0.58039215686274515),
(0.70196078431372544, 0.70196078431372544, 0.70196078431372544),
)
_Set3_data = (
(0.55294117647058827, 0.82745098039215681, 0.7803921568627451 ),
(1.0, 1.0, 0.70196078431372544),
(0.74509803921568629, 0.72941176470588232, 0.85490196078431369),
(0.98431372549019602, 0.50196078431372548, 0.44705882352941179),
(0.50196078431372548, 0.69411764705882351, 0.82745098039215681),
(0.99215686274509807, 0.70588235294117652, 0.3843137254901961 ),
(0.70196078431372544, 0.87058823529411766, 0.41176470588235292),
(0.9882352941176471, 0.80392156862745101, 0.89803921568627454),
(0.85098039215686272, 0.85098039215686272, 0.85098039215686272),
(0.73725490196078436, 0.50196078431372548, 0.74117647058823533),
(0.8, 0.92156862745098034, 0.77254901960784317),
(1.0, 0.92941176470588238, 0.43529411764705883),
)
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
#
# Most palette functions have been reduced to simple function descriptions
# by Reinier Heeres, since the rgb components were mostly straight lines.
# gist_earth_data and gist_ncar_data were simplified by a script and some
# manual effort.
_gist_earth_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.2824, 0.1882, 0.1882),
(0.4588, 0.2714, 0.2714),
(0.5490, 0.4719, 0.4719),
(0.6980, 0.7176, 0.7176),
(0.7882, 0.7553, 0.7553),
(1.0000, 0.9922, 0.9922),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0275, 0.0000, 0.0000),
(0.1098, 0.1893, 0.1893),
(0.1647, 0.3035, 0.3035),
(0.2078, 0.3841, 0.3841),
(0.2824, 0.5020, 0.5020),
(0.5216, 0.6397, 0.6397),
(0.6980, 0.7171, 0.7171),
(0.7882, 0.6392, 0.6392),
(0.7922, 0.6413, 0.6413),
(0.8000, 0.6447, 0.6447),
(0.8078, 0.6481, 0.6481),
(0.8157, 0.6549, 0.6549),
(0.8667, 0.6991, 0.6991),
(0.8745, 0.7103, 0.7103),
(0.8824, 0.7216, 0.7216),
(0.8902, 0.7323, 0.7323),
(0.8980, 0.7430, 0.7430),
(0.9412, 0.8275, 0.8275),
(0.9569, 0.8635, 0.8635),
(0.9647, 0.8816, 0.8816),
(0.9961, 0.9733, 0.9733),
(1.0000, 0.9843, 0.9843),
), 'blue': (
(0.0, 0.0, 0.0000),
(0.0039, 0.1684, 0.1684),
(0.0078, 0.2212, 0.2212),
(0.0275, 0.4329, 0.4329),
(0.0314, 0.4549, 0.4549),
(0.2824, 0.5004, 0.5004),
(0.4667, 0.2748, 0.2748),
(0.5451, 0.3205, 0.3205),
(0.7843, 0.3961, 0.3961),
(0.8941, 0.6651, 0.6651),
(1.0000, 0.9843, 0.9843),
)}
_gist_gray_data = {
'red': gfunc[3],
'green': gfunc[3],
'blue': gfunc[3],
}
_gist_heat_data = {
'red': lambda x: 1.5 * x,
'green': lambda x: 2 * x - 1,
'blue': lambda x: 4 * x - 3,
}
_gist_ncar_data = \
{'red': (
(0.0, 0.0, 0.0000),
(0.3098, 0.0000, 0.0000),
(0.3725, 0.3993, 0.3993),
(0.4235, 0.5003, 0.5003),
(0.5333, 1.0000, 1.0000),
(0.7922, 1.0000, 1.0000),
(0.8471, 0.6218, 0.6218),
(0.8980, 0.9235, 0.9235),
(1.0000, 0.9961, 0.9961),
), 'green': (
(0.0, 0.0, 0.0000),
(0.0510, 0.3722, 0.3722),
(0.1059, 0.0000, 0.0000),
(0.1569, 0.7202, 0.7202),
(0.1608, 0.7537, 0.7537),
(0.1647, 0.7752, 0.7752),
(0.2157, 1.0000, 1.0000),
(0.2588, 0.9804, 0.9804),
(0.2706, 0.9804, 0.9804),
(0.3176, 1.0000, 1.0000),
(0.3686, 0.8081, 0.8081),
(0.4275, 1.0000, 1.0000),
(0.5216, 1.0000, 1.0000),
(0.6314, 0.7292, 0.7292),
(0.6863, 0.2796, 0.2796),
(0.7451, 0.0000, 0.0000),
(0.7922, 0.0000, 0.0000),
(0.8431, 0.1753, 0.1753),
(0.8980, 0.5000, 0.5000),
(1.0000, 0.9725, 0.9725),
), 'blue': (
(0.0, 0.5020, 0.5020),
(0.0510, 0.0222, 0.0222),
(0.1098, 1.0000, 1.0000),
(0.2039, 1.0000, 1.0000),
(0.2627, 0.6145, 0.6145),
(0.3216, 0.0000, 0.0000),
(0.4157, 0.0000, 0.0000),
(0.4745, 0.2342, 0.2342),
(0.5333, 0.0000, 0.0000),
(0.5804, 0.0000, 0.0000),
(0.6314, 0.0549, 0.0549),
(0.6902, 0.0000, 0.0000),
(0.7373, 0.0000, 0.0000),
(0.7922, 0.9738, 0.9738),
(0.8000, 1.0000, 1.0000),
(0.8431, 1.0000, 1.0000),
(0.8980, 0.9341, 0.9341),
(1.0000, 0.9961, 0.9961),
)}
_gist_rainbow_data = (
(0.000, (1.00, 0.00, 0.16)),
(0.030, (1.00, 0.00, 0.00)),
(0.215, (1.00, 1.00, 0.00)),
(0.400, (0.00, 1.00, 0.00)),
(0.586, (0.00, 1.00, 1.00)),
(0.770, (0.00, 0.00, 1.00)),
(0.954, (1.00, 0.00, 1.00)),
(1.000, (1.00, 0.00, 0.75))
)
_gist_stern_data = {
'red': (
(0.000, 0.000, 0.000), (0.0547, 1.000, 1.000),
(0.250, 0.027, 0.250), # (0.2500, 0.250, 0.250),
(1.000, 1.000, 1.000)),
'green': ((0, 0, 0), (1, 1, 1)),
'blue': (
(0.000, 0.000, 0.000), (0.500, 1.000, 1.000),
(0.735, 0.000, 0.000), (1.000, 1.000, 1.000))
}
_gist_yarg_data = {
'red': lambda x: 1 - x,
'green': lambda x: 1 - x,
'blue': lambda x: 1 - x,
}
# This bipolar color map was generated from CoolWarmFloat33.csv of
# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland.
# <http://www.kennethmoreland.com/color-maps/>
_coolwarm_data = {
'red': [
(0.0, 0.2298057, 0.2298057),
(0.03125, 0.26623388, 0.26623388),
(0.0625, 0.30386891, 0.30386891),
(0.09375, 0.342804478, 0.342804478),
(0.125, 0.38301334, 0.38301334),
(0.15625, 0.424369608, 0.424369608),
(0.1875, 0.46666708, 0.46666708),
(0.21875, 0.509635204, 0.509635204),
(0.25, 0.552953156, 0.552953156),
(0.28125, 0.596262162, 0.596262162),
(0.3125, 0.639176211, 0.639176211),
(0.34375, 0.681291281, 0.681291281),
(0.375, 0.722193294, 0.722193294),
(0.40625, 0.761464949, 0.761464949),
(0.4375, 0.798691636, 0.798691636),
(0.46875, 0.833466556, 0.833466556),
(0.5, 0.865395197, 0.865395197),
(0.53125, 0.897787179, 0.897787179),
(0.5625, 0.924127593, 0.924127593),
(0.59375, 0.944468518, 0.944468518),
(0.625, 0.958852946, 0.958852946),
(0.65625, 0.96732803, 0.96732803),
(0.6875, 0.969954137, 0.969954137),
(0.71875, 0.966811177, 0.966811177),
(0.75, 0.958003065, 0.958003065),
(0.78125, 0.943660866, 0.943660866),
(0.8125, 0.923944917, 0.923944917),
(0.84375, 0.89904617, 0.89904617),
(0.875, 0.869186849, 0.869186849),
(0.90625, 0.834620542, 0.834620542),
(0.9375, 0.795631745, 0.795631745),
(0.96875, 0.752534934, 0.752534934),
(1.0, 0.705673158, 0.705673158)],
'green': [
(0.0, 0.298717966, 0.298717966),
(0.03125, 0.353094838, 0.353094838),
(0.0625, 0.406535296, 0.406535296),
(0.09375, 0.458757618, 0.458757618),
(0.125, 0.50941904, 0.50941904),
(0.15625, 0.558148092, 0.558148092),
(0.1875, 0.604562568, 0.604562568),
(0.21875, 0.648280772, 0.648280772),
(0.25, 0.688929332, 0.688929332),
(0.28125, 0.726149107, 0.726149107),
(0.3125, 0.759599947, 0.759599947),
(0.34375, 0.788964712, 0.788964712),
(0.375, 0.813952739, 0.813952739),
(0.40625, 0.834302879, 0.834302879),
(0.4375, 0.849786142, 0.849786142),
(0.46875, 0.860207984, 0.860207984),
(0.5, 0.86541021, 0.86541021),
(0.53125, 0.848937047, 0.848937047),
(0.5625, 0.827384882, 0.827384882),
(0.59375, 0.800927443, 0.800927443),
(0.625, 0.769767752, 0.769767752),
(0.65625, 0.734132809, 0.734132809),
(0.6875, 0.694266682, 0.694266682),
(0.71875, 0.650421156, 0.650421156),
(0.75, 0.602842431, 0.602842431),
(0.78125, 0.551750968, 0.551750968),
(0.8125, 0.49730856, 0.49730856),
(0.84375, 0.439559467, 0.439559467),
(0.875, 0.378313092, 0.378313092),
(0.90625, 0.312874446, 0.312874446),
(0.9375, 0.24128379, 0.24128379),
(0.96875, 0.157246067, 0.157246067),
(1.0, 0.01555616, 0.01555616)],
'blue': [
(0.0, 0.753683153, 0.753683153),
(0.03125, 0.801466763, 0.801466763),
(0.0625, 0.84495867, 0.84495867),
(0.09375, 0.883725899, 0.883725899),
(0.125, 0.917387822, 0.917387822),
(0.15625, 0.945619588, 0.945619588),
(0.1875, 0.968154911, 0.968154911),
(0.21875, 0.98478814, 0.98478814),
(0.25, 0.995375608, 0.995375608),
(0.28125, 0.999836203, 0.999836203),
(0.3125, 0.998151185, 0.998151185),
(0.34375, 0.990363227, 0.990363227),
(0.375, 0.976574709, 0.976574709),
(0.40625, 0.956945269, 0.956945269),
(0.4375, 0.931688648, 0.931688648),
(0.46875, 0.901068838, 0.901068838),
(0.5, 0.865395561, 0.865395561),
(0.53125, 0.820880546, 0.820880546),
(0.5625, 0.774508472, 0.774508472),
(0.59375, 0.726736146, 0.726736146),
(0.625, 0.678007945, 0.678007945),
(0.65625, 0.628751763, 0.628751763),
(0.6875, 0.579375448, 0.579375448),
(0.71875, 0.530263762, 0.530263762),
(0.75, 0.481775914, 0.481775914),
(0.78125, 0.434243684, 0.434243684),
(0.8125, 0.387970225, 0.387970225),
(0.84375, 0.343229596, 0.343229596),
(0.875, 0.300267182, 0.300267182),
(0.90625, 0.259301199, 0.259301199),
(0.9375, 0.220525627, 0.220525627),
(0.96875, 0.184115123, 0.184115123),
(1.0, 0.150232812, 0.150232812)]
}
# Implementation of Carey Rappaport's CMRmap.
# See `A Color Map for Effective Black-and-White Rendering of Color-Scale
# Images' by Carey Rappaport
# http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m
_CMRmap_data = {'red': ((0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.30, 0.30),
(0.375, 0.60, 0.60),
(0.500, 1.00, 1.00),
(0.625, 0.90, 0.90),
(0.750, 0.90, 0.90),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00)),
'green': ((0.000, 0.00, 0.00),
(0.125, 0.15, 0.15),
(0.250, 0.15, 0.15),
(0.375, 0.20, 0.20),
(0.500, 0.25, 0.25),
(0.625, 0.50, 0.50),
(0.750, 0.75, 0.75),
(0.875, 0.90, 0.90),
(1.000, 1.00, 1.00)),
'blue': ((0.000, 0.00, 0.00),
(0.125, 0.50, 0.50),
(0.250, 0.75, 0.75),
(0.375, 0.50, 0.50),
(0.500, 0.15, 0.15),
(0.625, 0.00, 0.00),
(0.750, 0.10, 0.10),
(0.875, 0.50, 0.50),
(1.000, 1.00, 1.00))}
# An MIT licensed, colorblind-friendly heatmap from Wistia:
# https://github.com/wistia/heatmap-palette
# http://wistia.com/blog/heatmaps-for-colorblindness
#
# >>> import matplotlib.colors as c
# >>> colors = ["#e4ff7a", "#ffe81a", "#ffbd00", "#ffa000", "#fc7f00"]
# >>> cm = c.LinearSegmentedColormap.from_list('wistia', colors)
# >>> _wistia_data = cm._segmentdata
# >>> del _wistia_data['alpha']
#
_wistia_data = {
'red': [(0.0, 0.8941176470588236, 0.8941176470588236),
(0.25, 1.0, 1.0),
(0.5, 1.0, 1.0),
(0.75, 1.0, 1.0),
(1.0, 0.9882352941176471, 0.9882352941176471)],
'green': [(0.0, 1.0, 1.0),
(0.25, 0.9098039215686274, 0.9098039215686274),
(0.5, 0.7411764705882353, 0.7411764705882353),
(0.75, 0.6274509803921569, 0.6274509803921569),
(1.0, 0.4980392156862745, 0.4980392156862745)],
'blue': [(0.0, 0.47843137254901963, 0.47843137254901963),
(0.25, 0.10196078431372549, 0.10196078431372549),
(0.5, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.0, 0.0, 0.0)],
}
# Categorical palettes from Vega:
# https://github.com/vega/vega/wiki/Scales
# (divided by 255)
#
_Vega10_data = (
(0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4
(1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e
(0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c
(0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728
(0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd
(0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b
(0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2
(0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f
(0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22
(0.09019607843137255, 0.7450980392156863, 0.8117647058823529), # 17becf
)
_Vega20_data = (
(0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4
(0.6823529411764706, 0.7803921568627451, 0.9098039215686274 ), # aec7e8
(1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e
(1.0, 0.7333333333333333, 0.47058823529411764 ), # ffbb78
(0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c
(0.596078431372549, 0.8745098039215686, 0.5411764705882353 ), # 98df8a
(0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728
(1.0, 0.596078431372549, 0.5882352941176471 ), # ff9896
(0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd
(0.7725490196078432, 0.6901960784313725, 0.8352941176470589 ), # c5b0d5
(0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b
(0.7686274509803922, 0.611764705882353, 0.5803921568627451 ), # c49c94
(0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2
(0.9686274509803922, 0.7137254901960784, 0.8235294117647058 ), # f7b6d2
(0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f
(0.7803921568627451, 0.7803921568627451, 0.7803921568627451 ), # c7c7c7
(0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22
(0.8588235294117647, 0.8588235294117647, 0.5529411764705883 ), # dbdb8d
(0.09019607843137255, 0.7450980392156863, 0.8117647058823529 ), # 17becf
(0.6196078431372549, 0.8549019607843137, 0.8980392156862745), # 9edae5
)
_Vega20b_data = (
(0.2235294117647059, 0.23137254901960785, 0.4745098039215686 ), # 393b79
(0.3215686274509804, 0.32941176470588235, 0.6392156862745098 ), # 5254a3
(0.4196078431372549, 0.43137254901960786, 0.8117647058823529 ), # 6b6ecf
(0.611764705882353, 0.6196078431372549, 0.8705882352941177 ), # 9c9ede
(0.38823529411764707, 0.4745098039215686, 0.2235294117647059 ), # 637939
(0.5490196078431373, 0.6352941176470588, 0.3215686274509804 ), # 8ca252
(0.7098039215686275, 0.8117647058823529, 0.4196078431372549 ), # b5cf6b
(0.807843137254902, 0.8588235294117647, 0.611764705882353 ), # cedb9c
(0.5490196078431373, 0.42745098039215684, 0.19215686274509805), # 8c6d31
(0.7411764705882353, 0.6196078431372549, 0.2235294117647059 ), # bd9e39
(0.9058823529411765, 0.7294117647058823, 0.3215686274509804 ), # e7ba52
(0.9058823529411765, 0.796078431372549, 0.5803921568627451 ), # e7cb94
(0.5176470588235295, 0.23529411764705882, 0.2235294117647059 ), # 843c39
(0.6784313725490196, 0.28627450980392155, 0.2901960784313726 ), # ad494a
(0.8392156862745098, 0.3803921568627451, 0.4196078431372549 ), # d6616b
(0.9058823529411765, 0.5882352941176471, 0.611764705882353 ), # e7969c
(0.4823529411764706, 0.2549019607843137, 0.45098039215686275), # 7b4173
(0.6470588235294118, 0.3176470588235294, 0.5803921568627451 ), # a55194
(0.807843137254902, 0.42745098039215684, 0.7411764705882353 ), # ce6dbd
(0.8705882352941177, 0.6196078431372549, 0.8392156862745098 ), # de9ed6
)
_Vega20c_data = (
(0.19215686274509805, 0.5098039215686274, 0.7411764705882353 ), # 3182bd
(0.4196078431372549, 0.6823529411764706, 0.8392156862745098 ), # 6baed6
(0.6196078431372549, 0.792156862745098, 0.8823529411764706 ), # 9ecae1
(0.7764705882352941, 0.8588235294117647, 0.9372549019607843 ), # c6dbef
(0.9019607843137255, 0.3333333333333333, 0.050980392156862744), # e6550d
(0.9921568627450981, 0.5529411764705883, 0.23529411764705882 ), # fd8d3c
(0.9921568627450981, 0.6823529411764706, 0.4196078431372549 ), # fdae6b
(0.9921568627450981, 0.8156862745098039, 0.6352941176470588 ), # fdd0a2
(0.19215686274509805, 0.6392156862745098, 0.32941176470588235 ), # 31a354
(0.4549019607843137, 0.7686274509803922, 0.4627450980392157 ), # 74c476
(0.6313725490196078, 0.8509803921568627, 0.6078431372549019 ), # a1d99b
(0.7803921568627451, 0.9137254901960784, 0.7529411764705882 ), # c7e9c0
(0.4588235294117647, 0.4196078431372549, 0.6941176470588235 ), # 756bb1
(0.6196078431372549, 0.6039215686274509, 0.7843137254901961 ), # 9e9ac8
(0.7372549019607844, 0.7411764705882353, 0.8627450980392157 ), # bcbddc
(0.8549019607843137, 0.8549019607843137, 0.9215686274509803 ), # dadaeb
(0.38823529411764707, 0.38823529411764707, 0.38823529411764707 ), # 636363
(0.5882352941176471, 0.5882352941176471, 0.5882352941176471 ), # 969696
(0.7411764705882353, 0.7411764705882353, 0.7411764705882353 ), # bdbdbd
(0.8509803921568627, 0.8509803921568627, 0.8509803921568627 ), # d9d9d9
)
class _deprecation_datad(dict):
"""
This class only exists for the purpose of raising an appropriate warning
for the deprecation of spectral. It should be remove in 2.2, once the
colormap spectral disappears.
"""
def __getitem__(self, key):
if key in ["spectral", "spectral_r"]:
warn_deprecated(
"2.0",
name="spectral and spectral_r",
alternative="nipy_spectral and nipy_spectral_r",
obj_type="colormap"
)
return super(_deprecation_datad, self).__getitem__(key)
datad = _deprecation_datad({
'afmhot': _afmhot_data,
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'bwr': _bwr_data,
'brg': _brg_data,
'CMRmap': _CMRmap_data,
'cool': _cool_data,
'copper': _copper_data,
'cubehelix': _cubehelix_data,
'flag': _flag_data,
'gnuplot': _gnuplot_data,
'gnuplot2': _gnuplot2_data,
'gray': _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet': _jet_data,
'ocean': _ocean_data,
'pink': _pink_data,
'prism': _prism_data,
'rainbow': _rainbow_data,
'seismic': _seismic_data,
'spring': _spring_data,
'summer': _summer_data,
'terrain': _terrain_data,
'winter': _winter_data,
'nipy_spectral': _nipy_spectral_data,
'spectral': _nipy_spectral_data, # alias for backward compatibility
})
datad['Blues'] = _Blues_data
datad['BrBG'] = _BrBG_data
datad['BuGn'] = _BuGn_data
datad['BuPu'] = _BuPu_data
datad['GnBu'] = _GnBu_data
datad['Greens'] = _Greens_data
datad['Greys'] = _Greys_data
datad['Oranges'] = _Oranges_data
datad['OrRd'] = _OrRd_data
datad['PiYG'] = _PiYG_data
datad['PRGn'] = _PRGn_data
datad['PuBu'] = _PuBu_data
datad['PuBuGn'] = _PuBuGn_data
datad['PuOr'] = _PuOr_data
datad['PuRd'] = _PuRd_data
datad['Purples'] = _Purples_data
datad['RdBu'] = _RdBu_data
datad['RdGy'] = _RdGy_data
datad['RdPu'] = _RdPu_data
datad['RdYlBu'] = _RdYlBu_data
datad['RdYlGn'] = _RdYlGn_data
datad['Reds'] = _Reds_data
datad['Spectral'] = _Spectral_data
datad['YlGn'] = _YlGn_data
datad['YlGnBu'] = _YlGnBu_data
datad['YlOrBr'] = _YlOrBr_data
datad['YlOrRd'] = _YlOrRd_data
datad['gist_earth'] = _gist_earth_data
datad['gist_gray'] = _gist_gray_data
datad['gist_heat'] = _gist_heat_data
datad['gist_ncar'] = _gist_ncar_data
datad['gist_rainbow'] = _gist_rainbow_data
datad['gist_stern'] = _gist_stern_data
datad['gist_yarg'] = _gist_yarg_data
datad['coolwarm'] = _coolwarm_data
datad['Wistia'] = _wistia_data
# Qualitative
datad['Accent'] = {'listed': _Accent_data}
datad['Dark2'] = {'listed': _Dark2_data}
datad['Paired'] = {'listed': _Paired_data}
datad['Pastel1'] = {'listed': _Pastel1_data}
datad['Pastel2'] = {'listed': _Pastel2_data}
datad['Set1'] = {'listed': _Set1_data}
datad['Set2'] = {'listed': _Set2_data}
datad['Set3'] = {'listed': _Set3_data}
datad['Vega10'] = {'listed': _Vega10_data}
datad['Vega20'] = {'listed': _Vega20_data}
datad['Vega20b'] = {'listed': _Vega20b_data}
datad['Vega20c'] = {'listed': _Vega20c_data}
| gpl-3.0 |
olologin/scikit-learn | sklearn/utils/fixes.py | 7 | 13511 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
def parallel_helper(obj, methodname, *args, **kwargs):
"""Helper to workaround Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
| bsd-3-clause |
raghavrv/scikit-learn | examples/manifold/plot_swissroll.py | 72 | 1295 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
institution/fpp | main.py | 1 | 9816 | import sys
from parse_svg import accept_mm, accept_path, accept_viewBox, make_path
from path import distance, Vec, Bezier1, project, Poly, Line
from path import intersect_poly_poly, intersect_poly_line, flattern_bezier_list
import xml.etree.ElementTree as ET
from log import fail, info, warning
from reader import Reader
import math
VERSION = '0.4.0'
TOLERANCE_MM = 0.1
STEP_MM = 0.5
SHOW_GUI = 0
PRINT_OUTPUT = 0
LINE_THICKNESS_MM = 0.18
if SHOW_GUI:
import matplotlib.pyplot as plt
"""
Note on units: every variable stores value in [u] (unless postfix _mm), use mm_to and to_mm for input, output
"""
"""
TODO: top siatka
TODO: print ruler with values to output ?
TODO: set STEP size show output ^
# TODO: add scale 1cm x 1cm box to output ?
# TODO: thinner line in output <- set to mm ?
# TODO: add cover start indicator?
# TODO: Alert on negative values on the profil -- check h value in calc_value
# TODO: think of some sanity check on output? ->
check last point == first point if applicable
rectangular test case
"""
#width="{width}"
#height="{height}"
OUTPUT_TEMPLATE = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
viewBox="{viewbox}"
id="svg2"
version="1.1">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<path
id="{ident}"
d="{path}"
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:{line_thickness_mm}mm;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;image-rendering:auto" />
</svg>
"""
def get_aabb(ps):
min_x = +2**30
min_y = +2**30
max_x = -2**30
max_y = -2**30
for x,y in ps:
if x > max_x:
max_x = x
if x < min_x:
min_x = x
if y > max_y:
max_y = y
if y < min_y:
min_y = y
return Vec(min_x,min_y),Vec(max_x,max_y)
def write_shape_to_svg(oname, ident, points, viewbox, to_mm):
"""
oname -- svg filename
points -- path points
viewbox -- (x,y,dx,dy)
to_mm -- conversion ratio
"""
x,y,dx,dy = viewbox
path = "M " + " ".join("{:.6f},{:.6f}".format(*p) for p in points)
with open(oname, 'wb') as f:
f.write(
OUTPUT_TEMPLATE.format(
path = path,
width = "{:.6f}mm".format(dx * to_mm),
height = "{:.6f}mm".format(dy * to_mm),
viewbox = "{:.6f} {:.6f} {:.6f} {:.6f}".format(*viewbox),
ident = ident,
line_thickness_mm = LINE_THICKNESS_MM,
).encode('utf-8')
)
info("written to {}".format(oname))
def save_top_svg(ps, oname, mar, to_mm):
assert len(ps) > 0
a,b = get_aabb(ps)
d = b - a
vb = (a[0]-mar,a[1]-mar,d[0]+2*mar,d[1]+2*mar)
write_shape_to_svg(oname = oname, ident='top', points = ps, viewbox = vb, to_mm = to_mm)
def save_side_svg(ps, oname, mar, to_mm):
assert len(ps) > 0
max_y = 0
for x,y in ps:
if y > max_y:
max_y = y
max_x = ps[-1][0]
min_x = ps[ 0][0]
w = max_x - min_x
h = max_y
ps.append((max_x,0))
ps.append((min_x,0))
ps.append(ps[0]) # close
vb = (min_x-mar,0-mar,w+2*mar,h+2*mar)
write_shape_to_svg(oname = oname, ident='side', points = ps, viewbox = vb, to_mm = to_mm)
def get_conversion_mm(vb, w_mm, h_mm):
"""
return -- to_mm, mm_to
"""
x0,y0,dx,dy = vb
to_mmx = w_mm/dx
to_mmy = h_mm/dy
assert math.isclose(to_mmx, to_mmy), (to_mmx, to_mmy)
to_mm = to_mmx
mm_to = dx/w_mm
return to_mm, mm_to
def read_poly_from_svg_path(root, name, tolerance):
x = root.find(".//*[@id='"+name+"']")
if x != None:
beziers = make_path(accept_path(Reader(x.get('d'))))
err, vertices = flattern_bezier_list(beziers, tolerance, name)
return Poly(vertices)
else:
return None
def show(point_obrys, point_profil, value_mm):
vis1, = Bezier1(point_obrys, point_profil).render(plt)
vis2 = plt.text(
x=point_profil[0] + 15,
y=point_profil[1],
s="{:.1f}mm".format(value_mm),
verticalalignment='center',
backgroundcolor='white',
#bbox=dict(facecolor='white', alpha=0.8),
#size=12
)
plt.show()
plt.pause(0.001)
#plt.waitforbuttonpress(timeout=-1)
vis1.remove()
vis2.remove()
def calc_value(pos, obrys, profil, odcinek, to_mm):
##TODO: Calculate sidewall and cover point
##TODO: (height, dist) -- sidewall height, distance along obrys from start point
odcinek_dir = odcinek.get_dir()
ort_odcinek = Vec(odcinek_dir[1], -odcinek_dir[0])
point_obrys = obrys.get_point(pos)
point_odcinek, _ = project(point = point_obrys, line = odcinek)
orto_line = Line(point_odcinek, point_odcinek + ort_odcinek)
orto_unit = orto_line.get_length()
cover_x,cover_y = None, None
ths = intersect_poly_line(poly = profil, line = orto_line)
#print(profil.xs, orto_line.p0, orto_line.p1)
#print(ths)
if len(ths) == 1:
t,h = ths[0]
point_profil = profil.get_point(t)
# assert all hs are having the same sign
h
cover_x = t
else:
fail('ERROR: unique intersection point of profil and orto_line is undefined')
_, cover_h = project(point = point_obrys, line = orto_line)
value = distance(point_odcinek, point_profil)
if SHOW_GUI:
show(point_obrys, point_profil, value * to_mm)
return value, Vec(cover_x, cover_h * orto_unit)
import os.path
def main():
info("FPP version: {}".format(VERSION))
if len(sys.argv) < 4:
info("usage: fpp <input.svg> <label1> <label2> [label3] ...")
sys.exit(0)
iname = sys.argv[1]
a = sys.argv[2]
for x in sys.argv[3:]:
b = x
main_segment(iname, a, b)
a = b
def main_segment(iname, start_label, end_label):
name = os.path.splitext(iname)[0]
info("opening: {!r}".format(iname))
tree = ET.parse(iname)
root = tree.getroot()
vb = accept_viewBox(Reader(root.get('viewBox')))
w_mm = accept_mm(Reader(root.get('width')))
h_mm = accept_mm(Reader(root.get('height')))
to_mm, mm_to = get_conversion_mm(vb, w_mm, h_mm)
info("width : {:.1f}mm".format(w_mm))
info("height: {:.1f}mm".format(h_mm))
#info("scale: 1mm is {:.3f}".format(1*mm_to))
#info("scale: 1 is {:.3f}mm".format(1*to_mm))
tolerance = TOLERANCE_MM * mm_to
profil = read_poly_from_svg_path(root, 'profil', tolerance)
if profil == None:
fail("ERROR: brak profilu na rysunku")
obrys = read_poly_from_svg_path(root, 'obrys', tolerance)
if obrys == None:
fail("ERROR: brak obrysu na rysunku")
info("obrys : length {:.1f}mm divided into {} segments".format(obrys.get_length()*to_mm, obrys.size()))
info("profil: length {:.1f}mm divided into {} segments".format(profil.get_length()*to_mm, profil.size()))
info("tolerance: {}mm".format(TOLERANCE_MM))
info("step size: {}mm".format(STEP_MM))
pos = 0.0
cross_poczatek = read_poly_from_svg_path(root, start_label, tolerance)
if cross_poczatek != None:
ths = intersect_poly_poly(obrys, cross_poczatek)
if len(ths) != 1:
fail("ERROR: start point not set")
else:
t,_ = ths[0]
pos = t
info("start: at {:.1f}mm".format(pos * to_mm))
else:
fail("ERROR: start point not set")
if end_label == start_label:
end = pos
info("end: at the beggining")
else:
cross_koniec = read_poly_from_svg_path(root, end_label, tolerance)
if cross_koniec != None:
ths = intersect_poly_poly(obrys, cross_koniec)
if len(ths) != 1:
info("end: present but not set")
else:
t,_ = ths[0]
end = t
info("end: at {:.1f}mm".format(end * to_mm))
else:
fail("ERROR: end point not set")
if pos < end:
delta = end - pos
else:
delta = obrys.get_length() - (pos - end)
assert delta > 0
assert delta <= obrys.get_length()
odcinek = Line(
profil.get_point(0),
profil.get_point(profil.get_length()),
)
# setup view
if SHOW_GUI:
plt.ion()
plt.show()
#plt.axis([vb[0], vb[0]+vb[2], vb[1], vb[1]+vb[3]])
profil.render(plt)
obrys.render(plt)
odcinek.render(plt)
if cross_poczatek:
cross_poczatek.render(plt)
if cross_koniec:
cross_koniec.render(plt)
rs = []
rs_cover = []
info("output length: {:.1f}mm".format(delta*to_mm))
info("running now...")
last_progress = 0
step = STEP_MM * mm_to
total = 0.0
while total < delta:
# print("pos,end = {:.1f},{:.1f}".format(pos*to_mm,end*to_mm))
# print("total,delta = {:.1f},{:.1f}".format(total*to_mm,delta*to_mm))
value, cover_p = calc_value(pos, obrys, profil, odcinek, to_mm)
if PRINT_OUTPUT:
print("OUTPUT: {:6.1f} {:6.1f} [mm] {:6.1f} {:6.1f} [u]".format(total*to_mm, value*to_mm, pos, value))
progress = int((total/delta) * 100)
if progress % 20 == 0 and progress != last_progress:
info("{:5}% done...".format(progress))
last_progress = progress
rs.append( (total, value) )
rs_cover.append( cover_p )
pos += step
if pos > obrys.get_length():
pos -= obrys.get_length()
total += step
# value at the end
total = delta
pos = end
value, cover_p = calc_value(pos, obrys, profil, odcinek, to_mm)
if PRINT_OUTPUT:
print("OUTPUT: {:6.1f} {:6.1f} [mm] {:6.1f} {:6.1f} [u]".format(total*to_mm, value*to_mm, pos, value))
rs.append((total, value))
rs_cover.append( cover_p )
info("{} points generated".format(len(rs)))
save_side_svg(rs, "{}-{}-{}-side.svg".format(name,start_label,end_label), 10*mm_to, to_mm)
save_top_svg(rs_cover, "{}-{}-{}-top.svg".format(name,start_label,end_label), 10*mm_to, to_mm)
if __name__ == '__main__':
main()
| agpl-3.0 |
ROGUE-JCTD/vida | vida/firestation/views.py | 1 | 20378 | import json
import pandas as pd
import urllib
from django.views.generic import DetailView, ListView, TemplateView
from django.shortcuts import get_object_or_404
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
from django.db.models import Max, Min, Count
from django.db.models.fields import FieldDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import IntegerField
from vida.vida_core.mixins import LoginRequiredMixin, CacheMixin
from vida.firestation.managers import Ntile, Case, When, Avg
from vida.firestation.models import FireStation, FireDepartment
from vida.usgs.models import StateorTerritoryHigh, CountyorEquivalent, IncorporatedPlace
class DISTScoreContextMixin(object):
@staticmethod
def add_dist_values_to_context():
context = {}
score_metrics = FireDepartment.objects.all().aggregate(Max('dist_model_score'), Min('dist_model_score'))
context['dist_max'] = score_metrics['dist_model_score__max']
context['dist_min'] = score_metrics['dist_model_score__min']
population_metrics = FireDepartment.objects.all().aggregate(Max('population'), Min('population'))
context['population_max'] = population_metrics['population__max'] or 0
context['population_min'] = population_metrics['population__min'] or 0
return context
class FeaturedDepartmentsMixin(object):
"""
Mixin to add featured departments to a request.
"""
@staticmethod
def get_featured_departments():
return FireDepartment.priority_departments.all()
class DepartmentDetailView(LoginRequiredMixin, CacheMixin, DISTScoreContextMixin, DetailView):
model = FireDepartment
template_name = 'firestation/department_detail.html'
page = 1
objects_per_page = 10
cache_timeout = 60 * 15
def get_context_data(self, **kwargs):
context = super(DepartmentDetailView, self).get_context_data(**kwargs)
page = self.request.GET.get('page')
paginator = Paginator(context['firedepartment'].firestation_set.all(), self.objects_per_page)
try:
stations = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
stations = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
stations = paginator.page(paginator.num_pages)
context['firestations'] = stations
# population stats provide summary statistics for fields within the current objects population class
context['population_stats'] = self.object.population_class_stats
population_quartiles = self.object.population_metrics_table
if population_quartiles:
# risk model fire count breaks for the bullet chart
vals = population_quartiles.objects.get_field_stats('residential_fires_avg_3_years', group_by='residential_fires_avg_3_years_quartile')
context['residential_fires_avg_3_years_breaks'] = [n['max'] for n in vals]
# size 2 or above fire breaks for the bullet chart
vals = population_quartiles.objects.get_field_stats('risk_model_size1_percent_size2_percent_sum', group_by='risk_model_size1_percent_size2_percent_sum_quartile')
context['risk_model_greater_than_size_2_breaks'] = [n['max'] for n in vals]
# deaths and injuries for the bullet chart
vals = population_quartiles.objects.get_field_stats('risk_model_deaths_injuries_sum', group_by='risk_model_deaths_injuries_sum_quartile')
context['risk_model_deaths_injuries_breaks'] = [n['max'] for n in vals]
# This should be a table with risk quartiles already identified
report_card_peers = population_quartiles.objects.all()
# this should be an object that has the current department quartile values
object_values = self.object.population_metrics_row
report_card_peers = report_card_peers.annotate(dist_model_residential_fires_quartile=Case(When(**{'dist_model_score__isnull': False, 'residential_fires_avg_3_years_quartile': object_values.residential_fires_avg_3_years_quartile, 'then': Ntile(4, output_field=IntegerField(), partition_by='dist_model_score is not null, residential_fires_avg_3_years_quartile', order_by='dist_model_score')}), output_field=IntegerField(), default=None))
report_card_peers = report_card_peers.annotate(dist_model_risk_model_greater_than_size_2_quartile=Case(When(**{'dist_model_score__isnull': False, 'risk_model_size1_percent_size2_percent_sum_quartile': object_values.risk_model_size1_percent_size2_percent_sum_quartile, 'then': Ntile(4, output_field=IntegerField(), partition_by='dist_model_score is not null, risk_model_size1_percent_size2_percent_sum_quartile', order_by='dist_model_score')}), output_field=IntegerField(), default=None))
report_card_peers = report_card_peers.annotate(dist_model_risk_model_deaths_injuries_quartile=Case(When(**{'dist_model_score__isnull': False, 'risk_model_deaths_injuries_sum_quartile': object_values.risk_model_deaths_injuries_sum_quartile, 'then': Ntile(4, output_field=IntegerField(), partition_by='dist_model_score is not null, risk_model_deaths_injuries_sum_quartile', order_by='dist_model_score')}), output_field=IntegerField(), default=None))
df = pd.DataFrame(list(report_card_peers.values('id',
'dist_model_score',
'dist_model_residential_fires_quartile',
'dist_model_risk_model_greater_than_size_2_quartile',
'dist_model_risk_model_deaths_injuries_quartile')))
context['dist_model_risk_model_greater_than_size_2_quartile_avg'] = df.dist_model_risk_model_greater_than_size_2_quartile.mean()
context['dist_model_risk_model_deaths_injuries_quartile_avg'] = df.dist_model_risk_model_deaths_injuries_quartile.mean()
context['dist_model_residential_fires_quartile_avg'] = df.dist_model_residential_fires_quartile.mean()
context['dist_model_risk_model_greater_than_size_2_quartile_breaks'] = df.groupby(['dist_model_risk_model_greater_than_size_2_quartile']).max()['dist_model_score'].tolist()
context['dist_model_risk_model_deaths_injuries_quartile_breaks'] = df.groupby(['dist_model_risk_model_deaths_injuries_quartile']).max()['dist_model_score'].tolist()
context['dist_model_residential_fires_quartile_breaks'] = df.groupby(['dist_model_residential_fires_quartile']).max()['dist_model_score'].tolist()
context['dist_model_residential_fires_quartile'] = df.loc[df['id']==self.object.id].dist_model_residential_fires_quartile.values[0]
context['dist_model_risk_model_greater_than_size_2_quartile'] = df.loc[df['id']==self.object.id].dist_model_risk_model_greater_than_size_2_quartile.values[0]
context['dist_model_risk_model_deaths_injuries_quartile'] = df.loc[df['id']==self.object.id].dist_model_risk_model_deaths_injuries_quartile.values[0]
#national_risk_band
from django.db import connections
cursor = connections['default'].cursor()
query = FireDepartment.objects.filter(dist_model_score__isnull=False).as_quartiles().values('id', 'risk_model_size1_percent_size2_percent_sum_quartile', 'risk_model_deaths_injuries_sum_quartile').query.__str__()
qu ="""
WITH results as (
SELECT "firestation_firedepartment"."id",
"firestation_firedepartment"."dist_model_score",
CASE WHEN ("firestation_firedepartment"."risk_model_fires_size1_percentage" IS NOT NULL OR "firestation_firedepartment"."risk_model_fires_size2_percentage" IS NOT NULL) THEN ntile(4) over (partition by COALESCE(risk_model_fires_size1_percentage,0)+COALESCE(risk_model_fires_size2_percentage,0) != 0 order by COALESCE(risk_model_fires_size1_percentage,0)+COALESCE(risk_model_fires_size2_percentage,0)) ELSE NULL END AS "risk_model_size1_percent_size2_percent_sum_quartile", CASE WHEN ("firestation_firedepartment"."risk_model_deaths" IS NOT NULL OR "firestation_firedepartment"."risk_model_injuries" IS NOT NULL) THEN ntile(4) over (partition by COALESCE(risk_model_deaths,0)+COALESCE(risk_model_injuries,0) != 0 order by COALESCE(risk_model_deaths,0)+COALESCE(risk_model_injuries,0)) ELSE NULL END AS "risk_model_deaths_injuries_sum_quartile" FROM "firestation_firedepartment" WHERE "firestation_firedepartment"."dist_model_score" IS NOT NULL ORDER BY "firestation_firedepartment"."name" ASC
),
row as (
SELECT * from results where results.id={id}
)
select ntile_results.ntile
from
(select results.id, ntile(4) over (order by results.dist_model_score asc)
from results
inner join row on results.{field}=row.{field}) as ntile_results
where ntile_results.id={id};
"""
cursor.execute(qu.format(query=query.strip(), id=self.object.id, field='risk_model_size1_percent_size2_percent_sum_quartile'))
try:
context['national_risk_model_size1_percent_size2_percent_sum_quartile'] = cursor.fetchone()[0]
except (KeyError, TypeError):
context['national_risk_model_size1_percent_size2_percent_sum_quartile'] = None
cursor.execute(qu.format(query=query.strip(), id=self.object.id, field='risk_model_deaths_injuries_sum_quartile'))
try:
context['national_risk_model_deaths_injuries_sum_quartile'] = cursor.fetchone()[0]
except (KeyError, TypeError):
context['national_risk_model_deaths_injuries_sum_quartile'] = None
context.update(self.add_dist_values_to_context())
return context
class SafeSortMixin(object):
"""
Allow queryset sorting on explicit fields.
"""
# A list of tuples containing the order_by string and verbose name
sort_by_fields = []
def model_field_valid(self, field, choices=None):
"""
Ensures a model field is valid.
"""
if not field:
return False
if choices and field not in choices:
return False
if hasattr(self, 'model'):
try:
self.model._meta.get_field(field.replace('-', '', 1))
except FieldDoesNotExist:
return False
return True
def get_queryset(self):
"""
Runs the sortqueryset method on the current queryset.
"""
queryset = super(SafeSortMixin, self).get_queryset()
return self.sort_queryset(queryset, self.request.GET.get('sortBy'))
def sort_queryset(self, queryset, order_by):
"""
Sorts a queryset based after ensuring the provided field is valid.
"""
if self.model_field_valid(order_by, choices=[name for name, verbose_name in self.sort_by_fields]):
queryset = queryset.order_by(order_by)
if order_by == '-population':
queryset = queryset.extra(select={ 'population_is_null': 'population IS NULL'}) \
.order_by('population_is_null', '-population')
# default sorting to -population and put null values at the end.
else:
queryset = queryset.extra(select={'population_is_null': 'population IS NULL'}) \
.order_by('population_is_null', '-population')
return queryset
def get_sort_context(self, context):
"""
Adds sorting context to the context object.
"""
context['sort_by_fields'] = []
for field, verbose_name in self.sort_by_fields:
get_params = self.request.GET.copy()
get_params['sort_by'] = field
context['sort_by_fields'].append(dict(name=verbose_name, field=field))
context['sort_by_fields'] = json.dumps(context['sort_by_fields'])
return context
class LimitMixin(object):
limit_by_amounts = [15, 30, 60, 90]
def limit_queryset(self, limit):
"""
Limits the queryset.
"""
try:
limit = int(limit)
# make sure the limit is not 0
if limit:
self.paginate_by = limit
except:
return
def get_queryset(self):
"""
Runs the sortqueryset method on the current queryset.
"""
queryset = super(LimitMixin, self).get_queryset()
return self.limit_queryset(queryset, self.request.GET.get('limit'))
def get_limit_context(self, context):
"""
Adds sorting context to the context object.
"""
context['limit_by_amounts'] = []
get_params = self.request.GET.copy()
for limit in self.limit_by_amounts:
get_params['limit'] = limit
context['limit_by_amounts'].append((self.request.path + '?' + urllib.urlencode(get_params), limit))
return context
class FireDepartmentListView(LoginRequiredMixin, ListView, SafeSortMixin, LimitMixin, DISTScoreContextMixin,
FeaturedDepartmentsMixin):
model = FireDepartment
paginate_by = 30
queryset = FireDepartment.objects.all()
sort_by_fields = [
('name', 'Name Ascending'),
('-name', 'Name Descending'),
('state', 'State Acscending'),
('-state', 'State Descending'),
('dist_model_score', 'Lowest DIST Score'),
('-dist_model_score', 'Highest DIST Score'),
('population', 'Smallest Population'),
('-population', 'Largest Population')
]
search_fields = ['fdid', 'state', 'region', 'name']
range_fields = ['population', 'dist_model_score']
def get_queryset(self):
queryset = super(FireDepartmentListView, self).get_queryset()
# If there is a 'q' argument, this is a full text search.
if self.request.GET.get('q'):
queryset = queryset.full_text_search(self.request.GET.get('q'))
queryset = self.sort_queryset(queryset, self.request.GET.get('sortBy'))
self.limit_queryset(self.request.GET.get('limit'))
for field, value in self.request.GET.items():
if value and value.lower() != 'any' and field in self.search_fields:
if field.lower().endswith('name'):
field += '__icontains'
queryset = queryset.filter(**{field: value})
#range is passed as pair of comma delimited min and max values for example 12,36
try:
if field in self.range_fields and value and "," in value:
min, max = value.split(",")
Min = int(min)
Max = int(max)
if Min:
queryset = queryset.filter(**{field+'__gte': Min})
if Max:
from django.db.models import Q
queryset = queryset.filter(Q(**{field+'__lte': Max})|Q(**{field+'__isnull': True}))
except:
pass
return queryset
def get_context_data(self, **kwargs):
context = super(FireDepartmentListView, self).get_context_data(**kwargs)
context = self.get_sort_context(context)
context.update(self.add_dist_values_to_context())
context['featured_departments'] = self.get_featured_departments().order_by('?')[:5]
page_obj = context['page_obj']
paginator = page_obj.paginator
min_page = page_obj.number - 5
min_page = max(1, min_page)
max_page = page_obj.number + 6
max_page = min(paginator.num_pages, max_page)
context['windowed_range'] = range(min_page, max_page)
context['dist_min'] = 0
if min_page > 1:
context['first_page'] = 1
if max_page < paginator.num_pages:
context['last_page'] = paginator.num_pages
return context
class FireStationDetailView(DetailView):
model = FireStation
class SpatialIntersectView(ListView):
model = FireStation
template_name = 'firestation/department_detail.html'
context_object_name = 'firestations'
def get_queryset(self):
self.object = get_object_or_404(StateorTerritoryHigh, state_name__iexact=self.kwargs.get('state'))
return FireStation.objects.filter(geom__intersects=self.object.geom)
def get_context_data(self, **kwargs):
context = super(SpatialIntersectView, self).get_context_data(**kwargs)
context['object'] = self.object
return context
class SetDistrictView(DetailView):
model = CountyorEquivalent
template_name = 'firestation/set_department.html'
def get_context_data(self, **kwargs):
context = super(SetDistrictView, self).get_context_data(**kwargs)
context['stations'] = FireStation.objects.filter(geom__intersects=self.object.geom)
context['incorporated_places'] = IncorporatedPlace.objects.filter(geom__intersects=self.object.geom)
next_fs = FireStation.objects.filter(department__isnull=True, state='VA').order_by('?')
if next_fs:
context['next'] = CountyorEquivalent.objects.filter(geom__intersects=next_fs[0].geom)[0]
return context
def post(self, request, *args, **kwargs):
county = self.get_object()
try:
fd = FireDepartment.objects.get(content_type=ContentType.objects.get_for_model(CountyorEquivalent),
object_id=county.id)
except FireDepartment.DoesNotExist:
fd = FireDepartment.objects.create(name='{0} {1} Fire Department'.format(county.county_name, county.get_fcode_display()), content_object=county,
geom=county.geom)
fd.save()
FireStation.objects.filter(geom__intersects=county.geom).update(department=fd)
return HttpResponseRedirect(reverse('set_fire_district', args=[county.id]))
class Stats(LoginRequiredMixin, TemplateView):
template_name='firestation/firestation_stats.html'
def get_context_data(self, **kwargs):
context = super(Stats, self).get_context_data(**kwargs)
context['stations'] = FireStation.objects.all()
context['departments'] = FireDepartment.objects.all()
context['stations_with_fdid'] = FireStation.objects.filter(fdid__isnull=False)
context['stations_with_departments'] = FireStation.objects.filter(department__isnull=False)
context['departments_with_government_unit'] = FireDepartment.objects.filter(object_id__isnull=True)
return context
class Home(LoginRequiredMixin, TemplateView, FeaturedDepartmentsMixin):
template_name = 'firestation/home.html'
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
priority_department_geojson = cache.get('priority_deparments_geojson')
if not priority_department_geojson:
priority_departments = []
#for fd in FireDepartment.priority_departments.all():
#priority_departments.append(dict(type='Feature', geometry=json.loads(fd.headquarters_geom.centroid.json),
# properties=dict(dist_model_score=fd.dist_model_score,
# predicted_fires=fd.predicted_fires_sum,
# name=fd.name, url=fd.get_absolute_url()
# )))
context['featured_departments'] = json.dumps(dict(type='FeatureCollection', features=priority_departments))
cache.set('priority_deparments_geojson', context['featured_departments'], 60 * 60 * 24)
else:
context['featured_departments'] = priority_department_geojson
return context
| mit |
jaaamessszzz/BMI203-Homework3 | homework3/__main__.py | 1 | 3753 | #!/usr/bin/env python3
"""
Align sequences using a given substitution matrix
Usage:
homework3 align <substitution_matrix> <sequence_pairs> [options]
homework3 gaps
homework3 thresholds
homework3 compare [options]
homework3 optimize <matrix_to_optimize>
Arguments:
<substitution_matrix>
Name of the substitution matrix to use for the alignments
<sequence_pairs>
Name of file containing space delimited pairs of sequences to align
align
Run alignment with specified substitution matrix and sequence pairs
thresholds
Run routine to determine the 0.7 score threshold for each gap opening/
extension penalty combination
gaps
Run routine to determine optimal gap penalties for the BLOSUM50 matrix
using the previously determined threshold scores
compare
Compare substitution matrices in terms of false positive rate. Also
generate ROC curves
optimize
Run algorithm to optimize scoring matrix...
<matrix_to_optimize>
Name of the matrix to run optimization on
Options:
-n --normalize
Normalize the raw scores from the alignment by the length of the
shorter of the two sequences
-o --output <path>
Save alignment output to a file named <path>
-c --compare_optimized <matrix>
Compare 1) default matrix, 2) optimized scoring matrix against default
matrix alignments, and 3) optimized scoring matrix against optimized
alignments
"""
if __name__ == '__main__':
from .align import align
from .util import determine_thresholds, determine_gap_penalties, run_alignments, compare_matrices, compare_optimized, matrix_optimization
import docopt
import re
import collections
import os
import sys
import numpy as np
import pandas as pd
from Bio import SeqIO
import seaborn as sns
import matplotlib.pyplot as plt
args = docopt.docopt(__doc__)
seq_align = align()
# Set substitution matrix
if args['align']:
# Initialize variables and stuff
substitution_matrices = {'BLOSUM50': 6,
'BLOSUM62': 6,
'BLOSUM62-Optimized': 0,
'MATIO': 2,
'MATIO-Optimized': 0,
'PAM100': 9,
'PAM100-Optimized': 0,
'PAM250': 9
}
seq_align.substitution_matrix = pd.read_table(open('./{}'.format(args['<substitution_matrix>'])),
delim_whitespace=True,
header=substitution_matrices[args['<substitution_matrix>']]
)
seq_align.substitution_matrix = seq_align.substitution_matrix.set_index(seq_align.substitution_matrix.columns.values)
seq_align.substitution_matrix.to_csv('{}.csv'.format(args['<substitution_matrix>']))
seq_align.working_pairs = open(args['<sequence_pairs>'])
run_alignments(seq_align, args['--output'])
if args['thresholds'] == True:
determine_thresholds()
if args['gaps'] == True:
determine_gap_penalties()
if args['compare'] == True:
if args['--normalize']:
normalize = True
else:
normalize = False
if args['--compare_optimized']:
compare_optimized(args['--compare_optimized'])
else:
compare_matrices(normalize)
if args['optimize'] == True:
matrix_optimization(args['<matrix_to_optimize>']) | apache-2.0 |
pratapvardhan/pandas | pandas/tests/indexing/test_indexing_slow.py | 5 | 3698 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
import pytest
class TestIndexingSlow(object):
@pytest.mark.slow
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[:i + 1] not in mi.index
continue
assert key[:i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i + 1], axis=1, inplace=True)
right.set_index(cols[i + 1:-1], inplace=True)
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0],
index=['jolia'])
tm.assert_series_equal(mi.loc[key[:i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(
list('abcdefghij'), n), choice(
pd.date_range('20141009', periods=10).tolist(), n), choice(
list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(
list('abcdefghijk'), m), choice(
pd.date_range('20141009', periods=11).tolist(), m), choice(
list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(
by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
assert (not (10 ** 6, 0) in result)
| bsd-3-clause |
research-team/memristive-brain | mem_neuron_code/fb.py | 1 | 1789 | from scipy.signal import find_peaks
from datetime import datetime as dt
import matplotlib.pyplot as plt
from nptdms import TdmsFile
import numpy as np
import argparse
import fnmatch
import os
def file_finder(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
if __name__ == "__main__":
# find tdms files
TDMSPath = file_finder('*.tdms', 'Moscow_data')[0]
# open founded tdms file
with TdmsFile.open(TDMSPath) as file_tdms:
grp='input_pins'
channels=[i.name for i in file_tdms[grp].channels()]
channel=file_tdms[grp][channels[0]]
times=channel.time_track()
time_range_begin = 250
time_range_end = 252
time_index_range_begin = np.where(times >= time_range_begin)[0][0]
time_index_range_end = np.where(times <= time_range_end)[-1][-1]
time = times[time_index_range_begin : time_index_range_end]
fig = plt.figure(figsize=(300,70))
plt.rcParams.update({'font.size': 32})
mem1fb = np.array(file_tdms['input_pins']['mem1fb' ][time_index_range_begin : time_index_range_end])
peaks_fb, _ = find_peaks(mem1fb, distance = 300, height = 0.8)
#difer = np.diff(mem1fb[peaks_fb])
#print(difer)
plt.boxplot(mem1fb[peaks_fb])
#fig.add_subplot(5,1,1)
print(np.mean(mem1fb[peaks_fb]))
#plt.plot(time, mem1fb)
#plt.plot(time[peaks_fb], mem1fb[peaks_fb], 'o', color = 'red')
print()
#plt.ylabel("mem1fb (V)") # ось абсцисс
#plt.xlabel("Time (s)") # ось ординат
#plt.grid() # включение отображение сетки
plt.show()
#plt.savefig('delta_t/datas_{}_{}.pdf'.format(time_range_begin, time_range_end))
plt.close('all')
| mit |
samfu1994/cs838webpage | code/train_and_test.py | 1 | 7003 | #author: hwang
#version: 2.0,0
import csv
import numpy as np
import random
from sklearn import svm
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPClassifier
import itertools
import sys
import argparse
import pydotplus
from IPython.display import Image
TRAIN_RATIO = 0.8
TEST_RATIO = 0.2
def k_fold(k, dataset, label):
#initialize
classifier_list = []
sub_data_sets = []
sub_labels = []
CV_accuracy = []
index_interval = len(dataset) / k
iter_idx = 0
#split data set and labels to k sub_sets
for i in range(k):
classifier_list.append(svm.SVC())
sub_data_sets.append(dataset[iter_idx:iter_idx+index_interval])
sub_labels.append(label[iter_idx:iter_idx+index_interval])
iter_idx += index_interval
#begin k-fold validation
for i in range(k):
accuracy_counter = 0
tmp_train_set = []
tmp_train_label = []
#choose i-th sub_sets for CV set
#choose other sub_sets as training set
if i == 0:
tmp_train_set.extend(sub_data_sets[i+1:])
tmp_train_label.extend(sub_labels[i+1:])
tmp_train_set = list(itertools.chain.from_iterable(tmp_train_set))
tmp_train_label = list(itertools.chain.from_iterable(tmp_train_label))
else:
tmp_train_set.extend(sub_data_sets[0:i])
tmp_train_set.extend(sub_data_sets[i+1:])
tmp_train_label.extend(sub_labels[0:i])
tmp_train_label.extend(sub_labels[i+1:])
tmp_train_set = list(itertools.chain.from_iterable(tmp_train_set))
tmp_train_label = list(itertools.chain.from_iterable(tmp_train_label))
tmp_cv_set = sub_data_sets[i]
tmp_cv_label = sub_labels[i]
tmp_train_set = np.array(tmp_train_set)
tmp_train_label = np.array(tmp_train_label)
tmp_cv_set = np.array(tmp_cv_set)
tmp_cv_label = np.array(tmp_cv_label)
classifier_list[i].fit(tmp_train_set, tmp_train_label)
tmp_prediction = classifier_list[i].predict(tmp_cv_set)
#calc CV accuracy
for idx in range(len(tmp_prediction)):
if tmp_cv_label[idx] == tmp_prediction[idx]:
accuracy_counter += 1
CV_accuracy.append(float(accuracy_counter) / len(test_label))
max_accuracy = max(CV_accuracy)
max_idx = CV_accuracy.index(max_accuracy)
best_cv_classifier = classifier_list[max_idx]
return best_cv_classifier
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--modelname', help='The machine learning model name.', required=True)
args = vars(parser.parse_args())
model_name = args["modelname"]
train_data = []
test_data = []
#load and parse the csv file
with open('train_data.csv', 'r') as data_file:
spamreader = csv.reader(data_file)
for row in spamreader:
train_data.append(row[1:])
train_data = train_data[1:]
for instance in train_data:
instance[3] = int(instance[3])
with open('test_data.csv', 'r') as data_file:
spamreader = csv.reader(data_file)
for row in spamreader:
test_data.append(row[1:])
test_data = test_data[1:]
for instance in test_data:
instance[3] = int(instance[3])
#start train with SVM
if model_name == 'SVM':
clf = svm.SVC()
if model_name == 'decision_tree':
clf = tree.DecisionTreeClassifier()
if model_name == 'random_forest':
clf = RandomForestClassifier()
if model_name == 'logistic_regression':
clf = LogisticRegression()
if model_name == 'linear_regression' or model_name == 'neural_net':
if model_name == 'linear_regression':
clf = LinearRegression()
elif model_name == 'neural_net':
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1)
for instance in train_data:
for feature_idx in range(len(train_data[0])):
instance[feature_idx] = int(instance[feature_idx])
#calc ratio between traning instances and test instances
#split raw data into training set and test set
train_set = []
train_label = []
test_set = []
test_label = []
for i in range(len(train_data)):
train_row = train_data[i]
train_set.append(train_row[0:-1])
train_label.append(train_row[-1])
for i in range(len(test_data)):
test_row = test_data[i]
test_set.append(test_row[0:-1])
test_label.append(test_row[-1])
train_set = np.array(train_set)
train_label = np.array(train_label)
test_set = np.array(test_set)
test_label = np.array(test_label)
clf.fit(train_set, train_label)
if model_name == "decision_tree":
#visualize the tree
university_feature_names = [
"has university",
"has state name",
"has state word",
"length",
"has dash",
"all_capital",
"has num"
]
university_target_name = ["True", "False"]
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=university_feature_names,
class_names=university_target_name,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_jpg("tree.jpg")
#make prediction
if model_name == 'logistic_regression' or model_name == 'linear_regression' or model_name == 'neural_net':
test_set = test_set.astype(np.float)
predict_result = clf.predict(test_set)
if model_name == 'linear_regression' or model_name == 'neural_net':
accuracy_counter = 0
tp = 0
fn = 0
fp = 0
for i in range(len(predict_result)):
predict = predict_result[i]
if predict >= 0.5:
predict_result[i] = 1
else:
predict_result[i] = 0
for idx in range(len(predict_result)):
if int(test_label[idx]) == predict_result[idx]:
accuracy_counter += 1
if predict_result[idx] == 1 and int(test_label[idx]) == 1:
tp += 1
if predict_result[idx] == 0 and int(test_label[idx]) == 1:
fn += 1
if predict_result[idx] == 1 and int(test_label[idx]) == 0:
fp += 1
accuracy = float(accuracy_counter) / len(test_label)
precision = float(tp) / (tp + fp)
recall = float(tp) / (tp + fn)
F1_score = 2 * (precision*recall) / float(precision + recall)
print("Test set accuracy: %s\tPrecision: %s\tRecall: %s\tF_1: %s" % (str(accuracy), str(precision), str(recall),
str(F1_score)))
#calc test set accuracy/ precision/ recall
#naive classifier only with training set
else:
accuracy_counter = 0
tp = 0
fn = 0
fp = 0
for idx in range(len(predict_result)):
if test_label[idx] == predict_result[idx]:
accuracy_counter += 1
if predict_result[idx] == '1' and test_label[idx] == '1':
tp += 1
if predict_result[idx] == '0' and test_label[idx] == '1':
fn += 1
if predict_result[idx] == '1' and test_label[idx] == '0':
fp += 1
accuracy = float(accuracy_counter) / len(test_label)
precision = float(tp) / (tp + fp)
recall = float(tp) / (tp + fn)
F1_score = 2 * (precision*recall) / float(precision + recall)
print("Test set accuracy: %s\tPrecision: %s\tRecall: %s\tF_1: %s" % (str(accuracy), str(precision), str(recall),
str(F1_score)))
| mit |
seanbell/intrinsic | bell2014/solver.py | 1 | 16160 | import timeit
import numpy as np
import sklearn
from skimage import morphology
from sklearn.cluster import MiniBatchKMeans
from .params import IntrinsicParameters
from .decomposition import IntrinsicDecomposition
from .energy import IntrinsicEnergy
from .optimization import minimize_l1, minimize_l2
from .krahenbuhl2013.krahenbuhl2013 import DenseCRF
class IntrinsicSolver(object):
def __init__(self, input, params):
""" Create a new solver with given input and parameters. Nothing
happens until you call ``solve``. """
if isinstance(params, dict):
params = IntrinsicParameters.from_dict(params)
self.params = params
self.input = input
self.energy = IntrinsicEnergy(self.input, params)
def solve(self):
""" Perform all steps. """
if self.params.logging:
t0 = timeit.default_timer()
print("solve...")
# Initialize
self.decomposition = IntrinsicDecomposition(self.params, self.input)
self.decomposition_history = []
self.initialize_intensities()
for i in xrange(self.params.n_iters):
if self.params.logging:
print("\nrun: starting iteration %s/%s" % (i, self.params.n_iters))
self.decomposition.iter_num = i
# STAGE 1
self.decomposition.stage_num = 1
self.stage1_optimize_r()
self.remove_unused_intensities()
self.decomposition_history.append(self.decomposition.copy())
if self.decomposition.intensities.shape[0] <= 1:
if self.params.logging:
print("Warning: only 1 reflectance -- exit early")
break
# STAGE 2
self.decomposition.stage_num = 2
if self.params.split_clusters and i == self.params.n_iters - 1:
self.split_label_clusters()
self.stage2_smooth_s()
self.decomposition_history.append(self.decomposition.copy())
# prepare final solution
r, s = self.decomposition.get_r_s()
if self.params.logging:
t1 = timeit.default_timer()
print("solve (%s s)" % (t1 - t0))
return r, s, self.decomposition
def prev_decomposition(self):
""" Return the previous decomposition (used to compute the blurred
shading target). """
if self.decomposition_history:
return self.decomposition_history[-1]
else:
return None
def initialize_intensities(self):
""" Initialization: k-means of the input image """
if self.params.logging:
t0 = timeit.default_timer()
print("initialization: k-means clustering with %s centers..." %
self.params.kmeans_n_clusters)
image_irg = self.input.image_irg
mask_nz = self.input.mask_nz
if self.params.fixed_seed:
# fix the seed when computing things like gradients across
# hyperparameters
random_state = np.random.RandomState(seed=59173)
else:
random_state = None
samples = image_irg[mask_nz[0], mask_nz[1], :]
if samples.shape[0] > self.params.kmeans_max_samples:
print("image is large: subsampling %s/%s random pixels" %
(self.params.kmeans_max_samples, samples.shape[0]))
samples = sklearn.utils \
.shuffle(samples)[:self.params.kmeans_max_samples, :]
samples[:, 0] *= self.params.kmeans_intensity_scale
kmeans = MiniBatchKMeans(
n_clusters=self.params.kmeans_n_clusters,
compute_labels=False, random_state=random_state)
kmeans.fit(samples)
assert self.params.kmeans_intensity_scale > 0
self.decomposition.intensities = (
kmeans.cluster_centers_[:, 0] /
self.params.kmeans_intensity_scale
)
self.decomposition.chromaticities = (
kmeans.cluster_centers_[:, 1:3]
)
if self.params.logging:
t1 = timeit.default_timer()
print("clustering done (%s s). intensities:\n%s" %
(t1 - t0, self.decomposition.intensities))
def stage1_optimize_r(self):
""" Stage 1: dense CRF optimization """
if self.params.logging:
t0 = timeit.default_timer()
print("stage1_optimize_r: compute costs...")
nlabels = self.decomposition.intensities.shape[0]
npixels = self.input.mask_nnz
# use a Python wrapper around the code from [Krahenbuhl et al 2013]
densecrf = DenseCRF(npixels, nlabels)
# unary costs
unary_costs = self.energy.compute_unary_costs(
decomposition=self.decomposition,
prev_decomposition=self.prev_decomposition(),
)
densecrf.set_unary_energy(unary_costs)
# pairwise costs
if self.params.pairwise_weight:
pairwise_costs = self.energy.compute_pairwise_costs(
decomposition=self.decomposition,
)
densecrf.add_pairwise_energy(
pairwise_costs=(self.params.pairwise_weight * pairwise_costs).astype(np.float32),
features=self.energy.get_features().copy(),
)
if self.params.logging:
print("stage1_optimize_r: optimizing dense crf (%s iters)..." %
self.params.n_crf_iters)
t0crf = timeit.default_timer()
# maximum aposteriori labeling ("x" variable in the paper)
self.decomposition.labels_nz = densecrf.map(self.params.n_crf_iters)
if self.params.logging:
t1crf = timeit.default_timer()
print("stage1_optimize_r: dense crf done (%s s)" % (t1crf - t0crf))
if self.params.logging:
t1 = timeit.default_timer()
print("stage1_optimize_r: done (%s s)" % (t1 - t0))
def stage2_smooth_s(self):
""" Stage 2: L1 shading smoothness """
if self.params.logging:
t0 = timeit.default_timer()
print('stage2_smooth_s: constructing linear system...')
if self.params.stage2_maintain_median_intensity:
median_intensity = np.median(self.decomposition.intensities)
log_intensities = np.log(self.decomposition.intensities)
# the 'A' matrix (in Ax = b) is in CSR sparse format
A_data, A_rows, A_cols, A_shape, b = \
self.construct_shading_smoothness_system(log_intensities)
if len(b) < 1:
if self.params.logging:
print('Warning: empty linear system (%s, nlabels=%s)' % (
self.basename, self.cur_intensities.shape[0]))
return
if self.params.logging:
print('solving linear system...')
# solve for the change to the variables, so that we can slightly
# regularize the variables to be near zero (i.e. near the previous
# value).
if self.params.stage2_norm == "L1":
minimize = minimize_l1
elif self.params.stage2_norm == "L2":
minimize = minimize_l2
else:
raise ValueError("Invalid stage2_norm: %s" % self.params.stage2_norm)
delta_intensities = minimize(
A_data, A_rows, A_cols, A_shape, b,
damp=1e-8, logging=self.params.logging,
)
intensities = np.exp(log_intensities + delta_intensities)
if self.params.stage2_maintain_median_intensity:
# Since there's a scale ambiguity and stage1 includes a term that
# depends on absolute shading, keep the median intensity constant.
# This is a pretty small adjustment.
intensities *= median_intensity / np.median(intensities)
self.decomposition.intensities = intensities
if self.params.logging:
t1 = timeit.default_timer()
print('stage2_smooth_s: done (%s s)' % (t1 - t0))
def construct_shading_smoothness_system(self, log_intensities):
""" Create a sparse matrix (CSR format) to minimize discontinuities in
the shading channel (by adjusting ``decomposition.intensities``).
:return: A_data, A_rows, A_cols, A_shape, b
"""
rows, cols = self.input.shape[0:2]
mask = self.input.mask
labels = self.decomposition.get_labels()
if self.params.stage2_chromaticity:
# labels represent RGB colors (but we are still only adjusting
# intensity)
log_image_rgb = self.input.log_image_rgb
log_reflectances_rgb = np.log(np.clip(self.decomposition.get_reflectances_rgb(), 1e-5, np.inf))
else:
# labels represent intensities
log_image_gray = self.input.log_image_gray
A_rows = []
A_cols = []
A_data = []
b = []
# Note that in the paper, params.shading_smooth_k = 1, i.e. only the
# immediate pixel neighbors are smoothed. This code is slightly more
# general in that it allows to smooth pixels k units away if you set
# shading_smooth_k > 1, weighted by 1/(k*k).
for k in xrange(1, self.params.shading_smooth_k + 1):
weight = 1.0 / (k * k)
for i in xrange(rows - k):
for j in xrange(cols - k):
if not mask[i, j]:
continue
if mask[i + k, j]:
l0 = labels[i, j]
l1 = labels[i + k, j]
if l0 != l1:
if self.params.stage2_chromaticity:
# RGB interpretation
for c in xrange(3):
A_rows.append(len(b))
A_cols.append(l0)
A_data.append(weight)
A_rows.append(len(b))
A_cols.append(l1)
A_data.append(-weight)
bval = (log_image_rgb[i, j, c] -
log_image_rgb[i + k, j, c] +
log_reflectances_rgb[l1, c] -
log_reflectances_rgb[l0, c])
b.append(weight * bval)
else:
# intensity interpretation
A_rows.append(len(b))
A_cols.append(l0)
A_data.append(weight)
A_rows.append(len(b))
A_cols.append(l1)
A_data.append(-weight)
bval = (log_image_gray[i, j] -
log_image_gray[i + k, j] +
log_intensities[l1] -
log_intensities[l0])
b.append(weight * bval)
if mask[i, j + k]:
l0 = labels[i, j]
l1 = labels[i, j + k]
if l0 != l1:
if self.params.stage2_chromaticity:
# RGB interpretation
for c in xrange(3):
A_rows.append(len(b))
A_cols.append(l0)
A_data.append(weight)
A_rows.append(len(b))
A_cols.append(l1)
A_data.append(-weight)
bval = (log_image_rgb[i, j, c] -
log_image_rgb[i, j + k, c] +
log_reflectances_rgb[l1, c] -
log_reflectances_rgb[l0, c])
b.append(weight * bval)
else:
# intensity interpretation
A_rows.append(len(b))
A_cols.append(l0)
A_data.append(weight)
A_rows.append(len(b))
A_cols.append(l1)
A_data.append(-weight)
bval = (log_image_gray[i, j] -
log_image_gray[i, j + k] +
log_intensities[l1] -
log_intensities[l0])
b.append(weight * bval)
A_shape = (len(b), log_intensities.shape[0])
return (
np.array(A_data),
np.array(A_rows),
np.array(A_cols),
A_shape,
np.array(b, dtype=np.float)
)
def remove_unused_intensities(self):
""" Remove any intensities that are not currently assigned to a pixel,
and then re-number all labels so they are contiguous again. """
if self.params.logging:
prev_r_s = self.decomposition.get_r_s()
labels_nz = self.decomposition.labels_nz
intensities = self.decomposition.intensities
chromaticities = self.decomposition.chromaticities
nlabels = intensities.shape[0]
new_to_old = np.nonzero(np.bincount(
labels_nz, minlength=nlabels))[0]
old_to_new = np.empty(nlabels, dtype=np.int32)
old_to_new.fill(-1)
for new, old in enumerate(new_to_old):
old_to_new[old] = new
self.decomposition.labels_nz = old_to_new[labels_nz]
self.decomposition.intensities = intensities[new_to_old]
self.decomposition.chromaticities = chromaticities[new_to_old]
if self.params.logging:
print ('remove_unused_intensities: %s/%s labels kept' % (
len(self.decomposition.intensities), len(intensities)))
if self.params.logging:
np.testing.assert_equal(self.decomposition.get_r_s(), prev_r_s)
assert (self.decomposition.chromaticities.shape[0] ==
self.decomposition.intensities.shape[0])
def split_label_clusters(self, neighbors=4):
""" Expand the set of labels by looking at each connected component in
the labels. Assign each component a new label number, and copy its old
intensity value to its new label. This typically expands the number of
labels from ~30 to ~3000, so you should only really do it on the last
iteration. """
if self.params.logging:
prev_r_s = self.decomposition.get_r_s()
rows, cols = self.input.shape[0:2]
labels = self.decomposition.get_labels()
intensities = self.decomposition.intensities
chromaticities = self.decomposition.chromaticities
# split labels
new_labels = morphology.label(labels, neighbors=neighbors)
# map labels
self.decomposition.labels_nz = new_labels[self.input.mask_nz]
# map intensities
_, indices = np.unique(new_labels.ravel(), return_index=True)
new_to_old = labels.ravel()[indices]
new_to_old = new_to_old[new_to_old != -1]
self.decomposition.intensities = intensities[new_to_old]
self.decomposition.chromaticities = chromaticities[new_to_old]
if self.params.logging:
print ('split_label_clusters: %s --> %s' % (
intensities.shape[0], self.decomposition.intensities.shape[0]))
self.remove_unused_intensities()
if self.params.logging:
np.testing.assert_equal(self.decomposition.get_r_s(), prev_r_s)
assert (self.decomposition.chromaticities.shape[0] ==
self.decomposition.intensities.shape[0])
| mit |
ryadzenine/featkit | featkit/item_selector.py | 1 | 2841 | from sklearn.base import TransformerMixin, BaseEstimator
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class MultiItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at the provided keys.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[keys]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'c': [2, 5, 4, 3, 9, 7],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key=['a', 'b'])
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
keys : hashable, required
The key corresponding to the desired value in a mappable
"""
def __init__(self, keys):
self.keys = keys
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.keys] # .apply(lambda x: {k: v for k, v in x.to_dict().iteritems()}, axis=0).as_matrix()
| mit |
mirjalil/IMDB-reviews | logistic/linear.py | 1 | 2069 | import os
from KaggleWord2VecUtility import KaggleWord2VecUtility
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
import pandas as pd
import numpy as np
train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0, \
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t", \
quoting=3 )
y = train["sentiment"]
print "Cleaning and parsing movie reviews...\n"
traindata = []
for i in xrange( 0, len(train["review"])):
traindata.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(train["review"][i], False)))
testdata = []
for i in xrange(0,len(test["review"])):
testdata.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(test["review"][i], False)))
print 'vectorizing... ',
tfv = TfidfVectorizer(min_df=5, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
X_all = traindata + testdata
lentrain = len(traindata)
print "fitting pipeline... ",
tfv.fit(X_all)
X_all = tfv.transform(X_all)
X = X_all[:lentrain]
X_test = X_all[lentrain:]
model = LogisticRegression(penalty='l2', dual=True, tol=0.0001,
C=1, fit_intercept=True, intercept_scaling=1.0,
class_weight=None, random_state=None)
print "20 Fold CV Score: ", np.mean(cross_validation.cross_val_score(model, X, y, cv=20, scoring='roc_auc'))
print "Retrain on all training data, predicting test labels...\n"
model.fit(X,y)
result = model.predict_proba(X_test)[:,1]
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
# Use pandas to write the comma-separated output file
output.to_csv(os.path.join(os.path.dirname(__file__), 'logistic.tfidf.2.csv'), index=False, quoting=3)
print "Wrote results to logistic.tfidf.2.csv"
| gpl-2.0 |
asteca/ASteCA | packages/synth_clust/completeness_rm.py | 1 | 5237 |
import numpy as np
def main(isoch_binar, completeness):
"""
Remove a number of stars according to the percentages of star loss found in
the 'mag_completeness' function of the luminosity module, for the real
observation.
"""
# Remember that 'comp_perc' here means the percentage of stars that should
# be *REMOVED* from each mag range|bin.
bin_edges, comp_perc = completeness[:2]
# If stars exist in the isochrone beyond the completeness magnitude
# level, then apply the removal of stars. Otherwise, skip it.
if np.max(isoch_binar[0]) > bin_edges[0]:
# Indexes of stars in 'isoch_binar[0]' whose main magnitude
# value falls between the ranges given by 'bin_edges'.
#
# Magnitude values *below* the minimum magnitude edge will be
# assigned the integer '0'.
c_indx = np.searchsorted(bin_edges, isoch_binar[0], side='left')
# Equivalent to np.histogram(isoch_binar[0], bin_edges)[0]
count = np.bincount(c_indx, minlength=len(comp_perc))
# Round to integer and clip at '0' so there are no negative values.
di = np.rint(count * comp_perc).astype(int).clip(0)
# The stars are already shuffled in 'mass_interp', so this selection
# of the first 'd' elements is not removing a given type of star over
# any other.
d_i = []
for i, d in enumerate(di):
d_i.append(np.where(c_indx == i)[0][:d])
d_i = np.concatenate(d_i)
# # DEPRECATED 03/12/19 #445
# # The minimum length is that of the 'comp_perc' list plus one,
# # so after removing the '0' elements both lists will have the same
# # length.
# count = np.bincount(c_indx, minlength=len(comp_perc) + 1)[1:]
# di = np.rint(count * comp_perc).astype(int).clip(0)
# # Actual indexes of stars, stored in each edge range.
# rang_indx = idxFind(len(bin_edges), c_indx)
# # Pick a number (given by the list 'di') of random elements in
# # each range. Those are the indexes of the elements that
# # should be *removed* from the sub-lists.
# d_i = indxRem(di, rang_indx, cmpl_rnd)
# Remove stars pointed to by 'd_i' from *all* the sub-arrays in
# 'isoch_binar'.
isoch_compl = np.delete(isoch_binar, d_i, axis=1)
#
# import matplotlib.pyplot as plt
# plt.hist(isoch_binar[0], bins=bin_edges, histtype='step', label="orig")
# plt.hist(isoch_compl[0], bins=bin_edges, histtype='step', label="new")
# plt.hist(isoch_compl2[0], bins=bin_edges, histtype='step', ls=':',
# label="old")
# plt.legend()
# plt.show()
else:
isoch_compl = isoch_binar
return isoch_compl
# DEPRECATED 03/12/ #445
# def indxRem(di, rang_indx, cmpl_rnd):
# """
# Select a fixed number (given by 'di') of random indexes in 'rang_indx'.
# These correspond to the stars that will be removed in each magnitude
# range.
# Source: https://stackoverflow.com/a/46079837/1391441
# """
# lens = np.array([len(_) for _ in rang_indx])
# di0 = np.minimum(lens, di)
# invalid_mask = lens[:, None] <= np.arange(lens.max())
# # Create a 2D random array in interval [0,1) to cover the max length of
# # subarrays.
# rand_nums = np.copy(cmpl_rnd[:len(lens) * lens.max()].reshape(
# len(lens), lens.max()))
# # For each subarray, set the invalid places to 1.0. Get argsort for each
# # row. Those 1s corresponding to the invalid places would stay at the back
# # because there were no 1s in the original random array. Thus, we have the
# # indices array.
# rand_nums[invalid_mask] = 1
# # Slice each row of those indices array to the extent of the lengths
# # listed in di.
# shuffled_indx = np.argpartition(rand_nums, lens - 1, axis=1)
# # Start a loop and slice each subarray from 'rang_indx' using those sliced
# # indices.
# out = []
# for i, all_idx in enumerate(shuffled_indx):
# if lens[i] > 0:
# slice_idx = all_idx[:di0[i]]
# out += rang_indx[i][slice_idx].tolist()
# return np.asarray(out)
# def idxFind(N, c_indx):
# """
# Store the actual indexes of stars in the accepted edge ranges, stored in
# each corresponding range.
# """
# # Reject stars in the 0th position. These are stars below the value
# # where the completeness loss starts.
# mask = (c_indx > 0)
# # Keep those stars with indexes in the accepted magnitude range.
# c_mask = c_indx[mask]
# # Ordered indexes for the masked stars.
# indices = np.arange(c_indx.size)[mask]
# # Indexes that would sort 'c_mask'.
# sorting_idx = np.argsort(c_mask, kind='mergesort')
# # Keep only the ordered indexes that are associated with 'c_mask'.
# ind_sorted = indices[sorting_idx]
# # Indexes of ordered indexes (N) positioned into 'c_mask'.
# x = np.searchsorted(
# c_mask, range(N), side='right', sorter=sorting_idx)
# # Store star indices into each edge range.
# rang_indx = [ind_sorted[x[i]:x[i + 1]] for i in range(N - 1)]
# return rang_indx
| gpl-3.0 |
ky822/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
Vincentyao1995/Globalink2017-UBC | Vincent/before meeting DT/pattern_minerals_REE/proxy_depth.py | 1 | 4529 | import test_algorithm as ta
import SP_paras
import pandas as pd
import numpy as np
from glob import glob
import os
threshold_noise = 0.005
discard_paras = 1
#open an image, attention: Tranversing all the pics later.
bands = [[1350,1500]]
#attention, maybe u could input all the bands and cal proxy, then cal corrcoef by code, at last cal corrcoef so that know which band is most related to amount. for band in bands[0:2500] df_proxy = cal_proxy_all_files(band) ; corrcoef(df_proxy, amount) And output corrcoef and maybe filter it automatically
#attention: time to change choose_band function, and seems mutiple band I don't consider, so that the structure of dataFrame is not right, maybe calculate proxy is also wrong. Maybe write band after one band is done. change the cal_proxy. para_dict['bandx']['AA']
#this funtion input the band you choose, and return a dateFrame that saves the proxy of all the pics
def cal_proxy_all_file(bands = bands):
filePath = 'data/'
files_list_oxi = glob(filePath + 'oxidos/'+"*.hdr")
name_list_oxi = [name for name in os.listdir(filePath + 'oxidos/') if name.endswith('.hdr')]
name_list_sulf = [name for name in os.listdir(filePath + 'sulfuros/') if name.endswith('.hdr')]
files_list_sulf = glob(filePath + 'sulfuros/'+'*.hdr')
num_oxi = len(files_list_oxi)
num_sulf = len(files_list_sulf)
df_proxy = None
for i in range(num_oxi):
image = ta.load_image(filePath = files_list_oxi[i])
proxy_dict = cal_proxy(image,bands = bands)
#initial the dateFrame that saves all files' proxies.
if i ==0:
df_proxy = pd.DataFrame(np.zeros((len(name_list_oxi)+len(name_list_sulf), len(proxy_dict.keys()))),columns = proxy_dict.keys(),index = name_list_oxi + name_list_sulf)
#write proxy to dateframe.
for key in proxy_dict:
df_proxy[key][name_list_oxi[i]] = proxy_dict[key]
# to show the progress
print('%s done!' % name_list_oxi[i])
for i in range(num_sulf):
image = ta.load_image(filePath = files_list_sulf[i])
proxy_dict = cal_proxy(image,bands = bands)
#write proxy to dataframe
for key in proxy_dict:
df_proxy[key][name_list_sulf[i]] = proxy_dict[key]
# to show the progress
print('%s done!' % name_list_sulf[i])
return df_proxy
#this function input an image, and return its proxy, as a dict, containing different proxy, AA_proxy, SAI_proxy
def cal_proxy(image,bands = bands):
global discard_paras
global threshold_noise
proxy_dict = {}
proxy_dict.setdefault('AA', 0.0)
proxy_dict.setdefault('AD', 0.0)
proxy_dict.setdefault('AW', 0.0)
proxy_dict.setdefault('AS', 0.0)
proxy_dict.setdefault('AP', 0.0)
proxy_dict.setdefault('SAI', 0.0)
width, height, deepth = image.shape
count_bg = 0
proxy_pop_mark = 0
for i in range(width):
for j in range(height):
sp_pixel = image[i,j]
#ignore the background pixel
if ta.exclude_BG(sp_pixel):
count_bg += 1
continue
para_dict = SP_paras.SP_paras(sp_pixel,wavelength = bands)
para_dict = SP_paras.dict_to_dataFrame(para_dict)
if discard_paras:
para_dict.pop('AA')
para_dict.pop('AP')
para_dict.pop('AW')
if proxy_pop_mark == 0:
proxy_dict.pop('AA')
proxy_dict.pop('AP')
proxy_dict.pop('AW')
proxy_pop_mark = 1
# sum paras in para_dict so that we get proxies.
for key in para_dict:
if para_dict[key] > threshold_noise:
proxy_dict[key] += para_dict[key]
# the number of rock pixels = total - background pixel
count_rocks = height* width - count_bg
#cal proxy, summation of all pixels' paras/ pixels' number
for key in proxy_dict:
proxy_dict[key] /= count_rocks
return proxy_dict
#this function accept a string: fileOutName, and output the proxy of all file into this file.
def output_proxy(fileOutName = 'proxy_all_file.txt', bands = bands):
df_proxy = cal_proxy_all_file(bands = bands)
file_out = open('data/' + fileOutName,'w')
df_proxy.to_string(file_out)
file_out.write('\n\t\tbands: %d - %d\n' % (bands[0][0],bands[0][1]))
print('all done!\n')
#attention
def corrcoef():
pass | mit |
mantidproject/mantid | qt/applications/workbench/workbench/plotting/test/test_toolbar.py | 3 | 5354 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import unittest
from unittest.mock import patch
import matplotlib
matplotlib.use("Agg") # noqa
import matplotlib.pyplot as plt
from mantidqt.utils.qt.testing import start_qapplication
from workbench.plotting.figuremanager import MantidFigureCanvas, FigureManagerWorkbench
@start_qapplication
class ToolBarTest(unittest.TestCase):
"""
Test that the grids on/off toolbar button has the correct state when creating a plot in various different cases.
"""
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_unchecked_for_plot_with_no_grid(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(subplot_kw={'projection': 'mantid'})
axes.plot([-10, 10], [1, 2])
# Grid button should be OFF because we have not enabled the grid.
self.assertFalse(self._is_grid_button_checked(fig))
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_checked_for_plot_with_grid(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(subplot_kw={'projection': 'mantid'})
axes.plot([-10, 10], [1, 2])
axes.grid()
# Grid button should be ON because we enabled the grid.
self.assertTrue(self._is_grid_button_checked(fig))
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_checked_for_plot_with_grid_using_kwargs(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(subplot_kw={'projection': 'mantid'})
axes.plot([-10, 10], [1, 2])
# Set the grid on using kwargs in tick_params, like the plot script generator.
axes.tick_params(axis='x', which='major', **{'gridOn': True})
axes.tick_params(axis='y', which='major', **{'gridOn': True})
# Grid button should be ON because we enabled the grid on both axes.
self.assertTrue(self._is_grid_button_checked(fig))
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_unchecked_for_plot_with_only_x_grid_using_kwargs(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(subplot_kw={'projection': 'mantid'})
axes.plot([-10, 10], [1, 2])
# Set the grid on using kwargs in tick_params, like the plot script generator.
axes.tick_params(axis='x', which='major', **{'gridOn': True})
# Grid button should be OFF because we only enabled the grid on one axis.
self.assertFalse(self._is_grid_button_checked(fig))
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_unchecked_for_tiled_plot_with_no_grids(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(ncols=2, nrows=2, subplot_kw={'projection': 'mantid'})
for ax in fig.get_axes():
ax.plot([-10, 10], [1, 2])
# None of the subplots have grids, so grid button should be toggled OFF.
self.assertFalse(self._is_grid_button_checked(fig))
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_checked_for_tiled_plot_with_all_grids(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(ncols=2, nrows=2, subplot_kw={'projection': 'mantid'})
for ax in fig.get_axes():
ax.plot([-10, 10], [1, 2])
ax.grid()
# All subplots have grids, so button should be toggled ON.
self.assertTrue(self._is_grid_button_checked(fig))
@patch("workbench.plotting.figuremanager.QAppThreadCall")
def test_button_unchecked_for_tiled_plot_with_some_grids(self, mock_qappthread):
mock_qappthread.return_value = mock_qappthread
fig, axes = plt.subplots(ncols=2, nrows=2, subplot_kw={'projection': 'mantid'})
for ax in fig.get_axes():
ax.plot([-10, 10], [1, 2])
# Only show major grid on 3/4 of the subplots.
axes[0][0].grid()
axes[0][1].grid()
axes[1][0].grid()
# Grid button should be OFF because not all subplots have grids.
self.assertFalse(self._is_grid_button_checked(fig))
@classmethod
def _is_grid_button_checked(cls, fig):
"""
Create the figure manager and check whether its toolbar is toggled on or off for the given figure.
We have to explicity call set_button_visibilty() here, which would otherwise be called within the show()
function.
"""
canvas = MantidFigureCanvas(fig)
fig_manager = FigureManagerWorkbench(canvas, 1)
# This is only called when show() is called on the figure manager, so we have to manually call it here.
fig_manager.toolbar.set_buttons_visibility(fig)
return fig_manager.toolbar._actions['toggle_grid'].isChecked()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Nyker510/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/style/core.py | 11 | 4957 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
"""
Core functions and attributes for the matplotlib style library:
``use``
Select style sheet to override the current matplotlib settings.
``context``
Context manager to use a style sheet temporarily.
``available``
List available style sheets.
``library``
A dictionary of style names and matplotlib settings.
"""
import os
import re
import contextlib
import matplotlib as mpl
from matplotlib import cbook
from matplotlib import rc_params_from_file
__all__ = ['use', 'context', 'available', 'library', 'reload_library']
BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib')
# Users may want multiple library paths, so store a list of paths.
USER_LIBRARY_PATHS = [os.path.join(mpl._get_configdir(), 'stylelib')]
STYLE_EXTENSION = 'mplstyle'
STYLE_FILE_PATTERN = re.compile('([\S]+).%s$' % STYLE_EXTENSION)
def is_style_file(filename):
"""Return True if the filename looks like a style file."""
return STYLE_FILE_PATTERN.match(filename) is not None
def use(name):
"""Use matplotlib style settings from a known style sheet or from a file.
Parameters
----------
name : str or list of str
Name of style or path/URL to a style file. For a list of available
style names, see `style.available`. If given a list, each style is
applied from first to last in the list.
"""
if cbook.is_string_like(name):
name = [name]
for style in name:
if style in library:
mpl.rcParams.update(library[style])
else:
try:
rc = rc_params_from_file(style, use_default_template=False)
mpl.rcParams.update(rc)
except:
msg = ("'%s' not found in the style library and input is "
"not a valid URL or path. See `style.available` for "
"list of available styles.")
raise ValueError(msg % style)
@contextlib.contextmanager
def context(name, after_reset=False):
"""Context manager for using style settings temporarily.
Parameters
----------
name : str or list of str
Name of style or path/URL to a style file. For a list of available
style names, see `style.available`. If given a list, each style is
applied from first to last in the list.
after_reset : bool
If True, apply style after resetting settings to their defaults;
otherwise, apply style on top of the current settings.
"""
initial_settings = mpl.rcParams.copy()
if after_reset:
mpl.rcdefaults()
use(name)
yield
mpl.rcParams.update(initial_settings)
def load_base_library():
"""Load style library defined in this package."""
library = dict()
library.update(read_style_directory(BASE_LIBRARY_PATH))
return library
def iter_user_libraries():
for stylelib_path in USER_LIBRARY_PATHS:
stylelib_path = os.path.expanduser(stylelib_path)
if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):
yield stylelib_path
def update_user_library(library):
"""Update style library with user-defined rc files"""
for stylelib_path in iter_user_libraries():
styles = read_style_directory(stylelib_path)
update_nested_dict(library, styles)
return library
def iter_style_files(style_dir):
"""Yield file path and name of styles in the given directory."""
for path in os.listdir(style_dir):
filename = os.path.basename(path)
if is_style_file(filename):
match = STYLE_FILE_PATTERN.match(filename)
path = os.path.abspath(os.path.join(style_dir, path))
yield path, match.groups()[0]
def read_style_directory(style_dir):
"""Return dictionary of styles defined in `style_dir`."""
styles = dict()
for path, name in iter_style_files(style_dir):
styles[name] = rc_params_from_file(path, use_default_template=False)
return styles
def update_nested_dict(main_dict, new_dict):
"""Update nested dict (only level of nesting) with new values.
Unlike dict.update, this assumes that the values of the parent dict are
dicts (or dict-like), so you shouldn't replace the nested dict if it
already exists. Instead you should update the sub-dict.
"""
# update named styles specified by user
for name, rc_dict in six.iteritems(new_dict):
if name in main_dict:
main_dict[name].update(rc_dict)
else:
main_dict[name] = rc_dict
return main_dict
# Load style library
# ==================
_base_library = load_base_library()
library = None
available = []
def reload_library():
"""Reload style library."""
global library, available
library = update_user_library(_base_library)
available[:] = library.keys()
reload_library()
| mit |
ruiting/opencog | scripts/make_benchmark_graphs.py | 56 | 3139 | #!/usr/bin/env python
# Requires matplotlib for graphing
# reads *_benchmark.csv files as output by atomspace_bm and turns them into
# graphs.
import csv
import numpy as np
import matplotlib.colors as colors
#import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#import matplotlib.font_manager as font_manager
import glob
import pdb
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def graph_file(fn,delta_rss=True):
print "Graphing " + fn
records = csv.reader(open(fn,'rb'),delimiter=",")
sizes=[]; times=[]; times_seconds=[]; memories=[]
for row in records:
sizes.append(int(row[0]))
times.append(int(row[1]))
memories.append(int(row[2]))
times_seconds.append(float(row[3]))
left, width = 0.1, 0.8
rect1 = [left, 0.5, width, 0.4] #left, bottom, width, height
rect2 = [left, 0.1, width, 0.4]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor)
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax1.plot(sizes,times_seconds,color='black')
if len(times_seconds) > 1000:
ax1.plot(sizes,moving_average(times_seconds,len(times_second) / 100),color='blue')
if delta_rss:
oldmemories = list(memories)
for i in range(1,len(memories)): memories[i] = oldmemories[i] - oldmemories[i-1]
ax2.plot(sizes,memories,color='black')
for label in ax1.get_xticklabels():
label.set_visible(False)
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 7 ticks, pruning the upper and lower so they don't overlap
# with other ticks
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax1.yaxis.set_major_formatter(fmt)
ax2.yaxis.set_major_locator(MyLocator(7, prune='upper'))
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax2.yaxis.set_major_formatter(fmt)
ax2.yaxis.offsetText.set_visible(False)
fig.show()
size = int(fmt.orderOfMagnitude) / 3
labels = ["B","KB","MB","GB"]
label = labels[size]
labels = ["","(10s)","(100s)"]
label += " " + labels[int(fmt.orderOfMagnitude) % 3]
ax2.set_xlabel("AtomSpace Size")
ax2.set_ylabel("RSS " + label)
ax1.set_ylabel("Time (seconds)")
ax1.set_title(fn)
fig.show()
fig.savefig(fn+".png",format="png")
files_to_graph = glob.glob("*_benchmark.csv")
for fn in files_to_graph:
graph_file(fn);
| agpl-3.0 |
bjornaa/ladim | examples/line/plot_cellcount.py | 1 | 1561 | # plot_cellcount.py
"""Count and plot number of particles in grid cell"""
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from postladim import ParticleFile, cellcount
# ---------------
# User settings
# ---------------
pfile = "line.nc" # LADiM particle file
grid_file = "../data/ocean_avg_0014.nc"
tframe0 = 20 # Start time frame
tframe1 = 95
# Subgrid definition
i0, i1 = 58, 150
j0, j1 = 60, 140
# --------------------------------
# ROMS grid, plot domain
with Dataset(grid_file) as f0:
M = f0.variables["mask_rho"][j0:j1, i0:i1]
lon = f0.variables["lon_rho"][j0:j1, i0:i1]
lat = f0.variables["lat_rho"][j0:j1, i0:i1]
# Cell centers and boundaries
Xcell = np.arange(i0, i1)
Ycell = np.arange(j0, j1)
Xb = np.arange(i0 - 0.5, i1)
Yb = np.arange(j0 - 0.5, j1)
# ---------------------------
# Read and count particles
# ---------------------------
pf = ParticleFile(pfile)
X = pf.X[tframe0:tframe1]
Y = pf.Y[tframe0:tframe1]
C = cellcount(X, Y, grid_limits=(i0, i1, j0, j1))
# ------------------------- ---
# Plot particle concentration
# -----------------------------
plt.set_cmap("cool")
plt.pcolormesh(Xb, Yb, C)
plt.colorbar()
# Land mask
constmap = plt.matplotlib.colors.ListedColormap([0.2, 0.6, 0.4])
M = np.ma.masked_where(M > 0, M)
plt.pcolormesh(Xb, Yb, M, cmap=constmap)
# Graticule
plt.contour(Xcell, Ycell, lat, levels=range(55, 64), colors="black", linestyles=":")
plt.contour(Xcell, Ycell, lon, levels=range(-4, 10, 2), colors="black", linestyles=":")
plt.axis("image")
plt.show()
| mit |
michaelpacer/scikit-image | doc/examples/applications/plot_geometric.py | 28 | 3253 | """
===============================
Using geometric transformations
===============================
In this example, we will see how to use geometric transformations in the context
of image processing.
"""
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import transform as tf
margins = dict(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
"""
Basics
======
Several different geometric transformation types are supported: similarity,
affine, projective and polynomial.
Geometric transformations can either be created using the explicit parameters
(e.g. scale, shear, rotation and translation) or the transformation matrix:
First we create a transformation using explicit parameters:
"""
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 2,
translation=(0, 1))
print(tform.params)
"""
Alternatively you can define a transformation by the transformation matrix
itself:
"""
matrix = tform.params.copy()
matrix[1, 2] = 2
tform2 = tf.SimilarityTransform(matrix)
"""
These transformation objects can then be used to apply forward and inverse
coordinate transformations between the source and destination coordinate
systems:
"""
coord = [1, 0]
print(tform2(coord))
print(tform2.inverse(tform(coord)))
"""
Image warping
=============
Geometric transformations can also be used to warp images:
"""
text = data.text()
tform = tf.SimilarityTransform(scale=1, rotation=math.pi / 4,
translation=(text.shape[0] / 2, -100))
rotated = tf.warp(text, tform)
back_rotated = tf.warp(rotated, tform.inverse)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.axis('off')
ax2.imshow(rotated)
ax2.axis('off')
ax3.imshow(back_rotated)
ax3.axis('off')
"""
.. image:: PLOT2RST.current_figure
Parameter estimation
====================
In addition to the basic functionality mentioned above you can also estimate the
parameters of a geometric transformation using the least-squares method.
This can amongst other things be used for image registration or rectification,
where you have a set of control points or homologous/corresponding points in two
images.
Let's assume we want to recognize letters on a photograph which was not taken
from the front but at a certain angle. In the simplest case of a plane paper
surface the letters are projectively distorted. Simple matching algorithms would
not be able to match such symbols. One solution to this problem would be to warp
the image so that the distortion is removed and then apply a matching algorithm:
"""
text = data.text()
src = np.array((
(0, 0),
(0, 50),
(300, 50),
(300, 0)
))
dst = np.array((
(155, 15),
(65, 40),
(260, 130),
(360, 95)
))
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
warped = tf.warp(text, tform3, output_shape=(50, 300))
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.plot(dst[:, 0], dst[:, 1], '.r')
ax1.axis('off')
ax2.imshow(warped)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
rolando/ClickSecurity-data_hacking | dga_detection/dga_model_gen.py | 6 | 13951 |
''' Build models to detect Algorithmically Generated Domain Names (DGA).
We're trying to classify domains as being 'legit' or having a high probability
of being generated by a DGA (Dynamic Generation Algorithm). We have 'legit' in
quotes as we're using the domains in Alexa as the 'legit' set.
'''
import os, sys
import traceback
import json
import optparse
import pickle
import collections
import sklearn
import sklearn.feature_extraction
import sklearn.ensemble
import sklearn.metrics
import pandas as pd
import numpy as np
import tldextract
import math
# Version printing is always a good idea
print 'Scikit Learn version: %s' % sklearn.__version__
print 'Pandas version: %s' % pd.__version__
print 'TLDExtract version: %s' % tldextract.__version__
# Version 0.12.0 of Pandas has a DeprecationWarning about Height blah that I'm ignoring
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Okay for this model we need the 2LD and nothing else
def domain_extract(uri):
ext = tldextract.extract(uri)
if (not ext.suffix):
return None
else:
return ext.domain
# Entropy calc (this must match model_eval)
def entropy(s):
p, lns = collections.Counter(s), float(len(s))
return -sum( count/lns * math.log(count/lns, 2) for count in p.values())
def show_cm(cm, labels):
# Compute percentanges
percent = (cm*100.0)/np.array(np.matrix(cm.sum(axis=1)).T) # Derp, I'm sure there's a better way
print 'Confusion Matrix Stats'
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
print "%s/%s: %.2f%% (%d/%d)" % (label_i, label_j, (percent[i][j]), cm[i][j], cm[i].sum())
def save_model_to_disk(name, model, model_dir='models'):
''' Serialize and save a model to disk'''
# First serialized the model
serialized_model = pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL)
# Model directory + model name
model_path = os.path.join(model_dir, name+'.model')
# Now store it to disk
print 'Storing Serialized Model to Disk (%s:%.2fMeg)' % (name, len(serialized_model)/1024.0/1024.0)
open(model_path,'wb').write(serialized_model)
def load_model_from_disk(name, model_dir='models'):
# Model directory is relative to this file
model_path = os.path.join(model_dir, name+'.model')
# Put a try/except around the model load in case it fails
try:
model = pickle.loads(open(model_path,'rb').read())
except:
print 'Could not load model: %s from directory %s!' % (name, model_path)
return None
return model
def main():
''' Main method, takes care of loading data, running it through the various analyses
and reporting the results
'''
# Handle command-line arguments
parser = optparse.OptionParser()
parser.add_option('--alexa-file', default='data/alexa_100k.csv', help='Alexa file to pull from. Default: %default')
(options, arguments) = parser.parse_args()
print options, arguments
try: # Pokemon exception handling
# This is the Alexa 1M domain list.
print 'Loading alexa dataframe...'
alexa_dataframe = pd.read_csv(options.alexa_file, names=['rank','uri'], header=None, encoding='utf-8')
print alexa_dataframe.info()
print alexa_dataframe.head()
# Compute the 2LD of the domain given by Alexa
alexa_dataframe['domain'] = [ domain_extract(uri) for uri in alexa_dataframe['uri']]
del alexa_dataframe['rank']
del alexa_dataframe['uri']
alexa_dataframe = alexa_dataframe.dropna()
alexa_dataframe = alexa_dataframe.drop_duplicates()
print alexa_dataframe.head()
# Set the class
alexa_dataframe['class'] = 'legit'
# Shuffle the data (important for training/testing)
alexa_dataframe = alexa_dataframe.reindex(np.random.permutation(alexa_dataframe.index))
alexa_total = alexa_dataframe.shape[0]
print 'Total Alexa domains %d' % alexa_total
# Read in the DGA domains
dga_dataframe = pd.read_csv('data/dga_domains.txt', names=['raw_domain'], header=None, encoding='utf-8')
# We noticed that the blacklist values just differ by captilization or .com/.org/.info
dga_dataframe['domain'] = dga_dataframe.applymap(lambda x: x.split('.')[0].strip().lower())
del dga_dataframe['raw_domain']
# It's possible we have NaNs from blanklines or whatever
dga_dataframe = dga_dataframe.dropna()
dga_dataframe = dga_dataframe.drop_duplicates()
dga_total = dga_dataframe.shape[0]
print 'Total DGA domains %d' % dga_total
# Set the class
dga_dataframe['class'] = 'dga'
print 'Number of DGA domains: %d' % dga_dataframe.shape[0]
print dga_dataframe.head()
# Concatenate the domains in a big pile!
all_domains = pd.concat([alexa_dataframe, dga_dataframe], ignore_index=True)
# Add a length field for the domain
all_domains['length'] = [len(x) for x in all_domains['domain']]
# Okay since we're trying to detect dynamically generated domains and short
# domains (length <=6) are crazy random even for 'legit' domains we're going
# to punt on short domains (perhaps just white/black list for short domains?)
all_domains = all_domains[all_domains['length'] > 6]
# Add a entropy field for the domain
all_domains['entropy'] = [entropy(x) for x in all_domains['domain']]
print all_domains.head()
# Now we compute NGrams for every Alexa domain and see if we can use the
# NGrams to help us better differentiate and mark DGA domains...
# Scikit learn has a nice NGram generator that can generate either char NGrams or word NGrams (we're using char).
# Parameters:
# - ngram_range=(3,5) # Give me all ngrams of length 3, 4, and 5
# - min_df=1e-4 # Minimumum document frequency. At 1e-4 we're saying give us NGrams that
# # happen in at least .1% of the domains (so for 100k... at least 100 domains)
alexa_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-4, max_df=1.0)
# I'm SURE there's a better way to store all the counts but not sure...
# At least the min_df parameters has already done some thresholding
counts_matrix = alexa_vc.fit_transform(alexa_dataframe['domain'])
alexa_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = alexa_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, alexa_counts), key=operator.itemgetter(1), reverse=True)
print 'Alexa NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We're also going to throw in a bunch of dictionary words
word_dataframe = pd.read_csv('data/words.txt', names=['word'], header=None, dtype={'word': np.str}, encoding='utf-8')
# Cleanup words from dictionary
word_dataframe = word_dataframe[word_dataframe['word'].map(lambda x: str(x).isalpha())]
word_dataframe = word_dataframe.applymap(lambda x: str(x).strip().lower())
word_dataframe = word_dataframe.dropna()
word_dataframe = word_dataframe.drop_duplicates()
print word_dataframe.head(10)
# Now compute NGrams on the dictionary words
# Same logic as above...
dict_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-5, max_df=1.0)
counts_matrix = dict_vc.fit_transform(word_dataframe['word'])
dict_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = dict_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, dict_counts), key=operator.itemgetter(1), reverse=True)
print 'Word NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We use the transform method of the CountVectorizer to form a vector
# of ngrams contained in the domain, that vector is than multiplied
# by the counts vector (which is a column sum of the count matrix).
def ngram_count(domain):
alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot vector multiply and transpose Woo Hoo!
dict_match = dict_counts * dict_vc.transform([domain]).T
print '%s Alexa match:%d Dict match: %d' % (domain, alexa_match, dict_match)
# Examples:
ngram_count('google')
ngram_count('facebook')
ngram_count('1cb8a5f36f')
ngram_count('pterodactylfarts')
ngram_count('ptes9dro-dwacty2lfa5rrts')
ngram_count('beyonce')
ngram_count('bey666on4ce')
# Compute NGram matches for all the domains and add to our dataframe
all_domains['alexa_grams']= alexa_counts * alexa_vc.transform(all_domains['domain']).T
all_domains['word_grams']= dict_counts * dict_vc.transform(all_domains['domain']).T
print all_domains.head()
# Use the vectorized operations of the dataframe to investigate differences
# between the alexa and word grams
all_domains['diff'] = all_domains['alexa_grams'] - all_domains['word_grams']
# The table below shows those domain names that are more 'dictionary' and less 'web'
print all_domains.sort(['diff'], ascending=True).head(10)
# The table below shows those domain names that are more 'web' and less 'dictionary'
# Good O' web....
print all_domains.sort(['diff'], ascending=False).head(50)
# Lets look at which Legit domains are scoring low on both alexa and word gram count
weird_cond = (all_domains['class']=='legit') & (all_domains['word_grams']<3) & (all_domains['alexa_grams']<2)
weird = all_domains[weird_cond]
print weird.shape[0]
print weird.head(10)
# Epiphany... Alexa really may not be the best 'exemplar' set...
# (probably a no-shit moment for everyone else :)
#
# Discussion: If you're using these as exemplars of NOT DGA, then your probably
# making things very hard on your machine learning algorithm.
# Perhaps we should have two categories of Alexa domains, 'legit'
# and a 'weird'. based on some definition of weird.
# Looking at the entries above... we have approx 80 domains
# that we're going to mark as 'weird'.
#
all_domains.loc[weird_cond, 'class'] = 'weird'
print all_domains['class'].value_counts()
all_domains[all_domains['class'] == 'weird'].head()
# Perhaps we will just exclude the weird class from our ML training
not_weird = all_domains[all_domains['class'] != 'weird']
X = not_weird.as_matrix(['length', 'entropy', 'alexa_grams', 'word_grams'])
# Labels (scikit learn uses 'y' for classification labels)
y = np.array(not_weird['class'].tolist())
# Random Forest is a popular ensemble machine learning classifier.
# http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestClassifier.html
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=20, compute_importances=True) # Trees in the forest
# Train on a 80/20 split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Now plot the results of the holdout set in a confusion matrix
labels = ['legit', 'dga']
cm = sklearn.metrics.confusion_matrix(y_test, y_pred, labels)
show_cm(cm, labels)
# We can also look at what features the learning algorithm thought were the most important
importances = zip(['length', 'entropy', 'alexa_grams', 'word_grams'], clf.feature_importances_)
print importances
# Now train on the whole thing before doing tests and saving models to disk
clf.fit(X, y)
# test_it shows how to do evaluation, also fun for manual testing below :)
def test_it(domain):
_alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot matrix multiply and transpose Woo Hoo!
_dict_match = dict_counts * dict_vc.transform([domain]).T
_X = [len(domain), entropy(domain), _alexa_match, _dict_match]
print '%s : %s' % (domain, clf.predict(_X)[0])
# Examples (feel free to change these and see the results!)
test_it('google')
test_it('google88')
test_it('facebook')
test_it('1cb8a5f36f')
test_it('pterodactylfarts')
test_it('ptes9dro-dwacty2lfa5rrts')
test_it('beyonce')
test_it('bey666on4ce')
test_it('supersexy')
test_it('yourmomissohotinthesummertime')
test_it('35-sdf-09jq43r')
test_it('clicksecurity')
# Serialize model to disk
save_model_to_disk('dga_model_random_forest', clf)
save_model_to_disk('dga_model_alexa_vectorizor', alexa_vc)
save_model_to_disk('dga_model_alexa_counts', alexa_counts)
save_model_to_disk('dga_model_dict_vectorizor', dict_vc)
save_model_to_disk('dga_model_dict_counts', dict_counts)
except KeyboardInterrupt:
print 'Goodbye Cruel World...'
sys.exit(0)
except Exception, error:
traceback.print_exc()
print '(Exception):, %s' % (str(error))
sys.exit(1)
if __name__ == '__main__':
main() | mit |
nelango/ViralityAnalysis | model/lib/pandas/core/nanops.py | 9 | 23144 | import itertools
import functools
import numpy as np
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.compat import builtins
from pandas.core.common import (isnull, notnull, _values_from_object,
_maybe_upcast_putmask,
ensure_float, _ensure_float64,
_ensure_int64, _ensure_object,
is_float, is_integer, is_complex,
is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype, _get_dtype,
is_int_or_datetime_dtype, is_any_int_dtype,
_int64_max)
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
try:
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and
not is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError) as e:
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = _maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isnull(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if np.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
dtype = values.dtype
mask = isnull(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values) ** 2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values,
skipna,
fill_value_typ=fill_value_typ,
)
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8','m8')
def nanskew(values, axis=None, skipna=True):
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
typ = values.dtype.type
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** typ(2)
C = (values ** 3).sum(axis) / count - A ** typ(3) - typ(3) * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt(count * count - count) * C) /
((count - typ(2)) * np.sqrt(B) ** typ(3)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8','m8')
def nankurt(values, axis=None, skipna=True):
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
typ = values.dtype.type
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** typ(2)
C = (values ** 3).sum(axis) / count - A ** typ(3) - typ(3) * A * B
D = (values ** 4).sum(axis) / count - A ** typ(4) - typ(6) * B * A * A - typ(4) * C * A
B = _zero_out_fperr(B)
D = _zero_out_fperr(D)
if not isinstance(B, np.ndarray):
# if B is a scalar, check these corner cases first before doing division
if count < 4:
return np.nan
if B == 0:
return 0
result = (((count * count - typ(1)) * D / (B * B) - typ(3) * ((count - typ(1)) ** typ(2))) /
((count - typ(2)) * (count - typ(3))))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8','m8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if np.isscalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8','m8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8','m8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
import operator
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
| mit |
nelango/ViralityAnalysis | model/lib/pandas/tseries/util.py | 9 | 2955 | from pandas.compat import range, lrange
import numpy as np
import pandas.core.common as com
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
def pivot_annual(series, freq=None):
"""
Group a series by years, taking leap years into account.
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
The fist column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
For example, if the initial series has a daily frequency, the 59th column
of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st,
and the 60th column is masked for non-leap years.
With a hourly initial frequency, the (59*24)th column of the output always
correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and
the 24 columns between (59*24) and (61*24) are masked.
If the original frequency is less than daily, the output is equivalent to
``series.convert('A', func=None)``.
Parameters
----------
series : Series
freq : string or None, default None
Returns
-------
annual : DataFrame
"""
index = series.index
year = index.year
years = nanops.unique1d(year)
if freq is not None:
freq = freq.upper()
else:
freq = series.index.freq
if freq == 'D':
width = 366
offset = index.dayofyear - 1
# adjust for leap year
offset[(~isleapyear(year)) & (offset >= 59)] += 1
columns = lrange(1, 367)
# todo: strings like 1/1, 1/25, etc.?
elif freq in ('M', 'BM'):
width = 12
offset = index.month - 1
columns = lrange(1, 13)
elif freq == 'H':
width = 8784
grouped = series.groupby(series.index.year)
defaulted = grouped.apply(lambda x: x.reset_index(drop=True))
defaulted.index = defaulted.index.droplevel(0)
offset = np.asarray(defaulted.index)
offset[~isleapyear(year) & (offset >= 1416)] += 24
columns = lrange(1, 8785)
else:
raise NotImplementedError(freq)
flat_index = (year - years.min()) * width + offset
flat_index = com._ensure_platform_int(flat_index)
values = np.empty((len(years), width))
values.fill(np.nan)
values.put(flat_index, series.values)
return DataFrame(values, index=years, columns=columns)
def isleapyear(year):
"""
Returns true if year is a leap year.
Parameters
----------
year : integer / sequence
A given (list of) year(s).
"""
year = np.asarray(year)
return np.logical_or(year % 400 == 0,
np.logical_and(year % 4 == 0, year % 100 > 0))
| mit |
ben-hopps/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py | 69 | 40833 | """
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
| agpl-3.0 |
soulmachine/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
jseabold/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
belteshassar/cartopy | lib/cartopy/mpl/feature_artist.py | 3 | 6435 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
This module defines the :class:`FeatureArtist` class, for drawing
:class:`Feature` instances with matplotlib.
"""
from __future__ import (absolute_import, division, print_function)
import warnings
import weakref
import matplotlib.artist
import matplotlib.collections
import cartopy.mpl.patch as cpatch
class _GeomKey(object):
"""
Provide id() based equality and hashing for geometries.
Instances of this class must be treated as immutable for the caching
to operate correctly.
A workaround for Shapely polygons no longer being hashable as of 1.5.13.
"""
def __init__(self, geom):
self._id = id(geom)
def __eq__(self, other):
return self._id == other._id
def __hash__(self):
return hash(self._id)
class FeatureArtist(matplotlib.artist.Artist):
"""
A subclass of :class:`~matplotlib.artist.Artist` capable of
drawing a :class:`cartopy.feature.Feature`.
"""
_geom_key_to_geometry_cache = weakref.WeakValueDictionary()
"""
A mapping from _GeomKey to geometry to assist with the caching of
transformed matplotlib paths.
"""
_geom_key_to_path_cache = weakref.WeakKeyDictionary()
"""
A nested mapping from geometry (converted to a _GeomKey) and target
projection to the resulting transformed matplotlib paths::
{geom: {target_projection: list_of_paths}}
This provides a significant boost when producing multiple maps of the
same projection.
"""
def __init__(self, feature, **kwargs):
"""
Args:
* feature:
an instance of :class:`cartopy.feature.Feature` to draw.
* kwargs:
keyword arguments to be used when drawing the feature. These
will override those shared with the feature.
"""
super(FeatureArtist, self).__init__()
if kwargs is None:
kwargs = {}
self._kwargs = dict(kwargs)
# Set default zorder so that features are drawn before
# lines e.g. contours but after images.
# Note that the zorder of Patch, PatchCollection and PathCollection
# are all 1 by default. Assuming equal zorder drawing takes place in
# the following order: collections, patches, lines (default zorder=2),
# text (default zorder=3), then other artists e.g. FeatureArtist.
if self._kwargs.get('zorder') is not None:
self.set_zorder(self._kwargs['zorder'])
elif feature.kwargs.get('zorder') is not None:
self.set_zorder(feature.kwargs['zorder'])
else:
# The class attribute matplotlib.collections.PathCollection.zorder
# was removed after mpl v1.2.0, so the hard-coded value of 1 is
# used instead.
self.set_zorder(1)
self._feature = feature
@matplotlib.artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
"""
Draws the geometries of the feature that intersect with the extent of
the :class:`cartopy.mpl.GeoAxes` instance to which this
object has been added.
"""
if not self.get_visible():
return
ax = self.axes
feature_crs = self._feature.crs
# Get geometries that we need to draw.
extent = None
try:
extent = ax.get_extent(feature_crs)
except ValueError:
warnings.warn('Unable to determine extent. Defaulting to global.')
geoms = self._feature.intersecting_geometries(extent)
# Project (if necessary) and convert geometries to matplotlib paths.
paths = []
key = ax.projection
for geom in geoms:
# As Shapely geometries cannot be relied upon to be
# hashable, we have to use a WeakValueDictionary to manage
# their weak references. The key can then be a simple,
# "disposable", hashable geom-key object that just uses the
# id() of a geometry to determine equality and hash value.
# The only persistent, strong reference to the geom-key is
# in the WeakValueDictionary, so when the geometry is
# garbage collected so is the geom-key.
# The geom-key is also used to access the WeakKeyDictionary
# cache of transformed geometries. So when the geom-key is
# garbage collected so are the transformed geometries.
geom_key = _GeomKey(geom)
FeatureArtist._geom_key_to_geometry_cache.setdefault(
geom_key, geom)
mapping = FeatureArtist._geom_key_to_path_cache.setdefault(
geom_key, {})
geom_paths = mapping.get(key)
if geom_paths is None:
if ax.projection != feature_crs:
projected_geom = ax.projection.project_geometry(
geom, feature_crs)
else:
projected_geom = geom
geom_paths = cpatch.geos_to_path(projected_geom)
mapping[key] = geom_paths
paths.extend(geom_paths)
# Build path collection and draw it.
transform = ax.projection._as_mpl_transform(ax)
# Combine all the keyword args in priority order
final_kwargs = dict(self._feature.kwargs)
final_kwargs.update(self._kwargs)
final_kwargs.update(kwargs)
c = matplotlib.collections.PathCollection(paths,
transform=transform,
**final_kwargs)
c.set_clip_path(ax.patch)
c.set_figure(ax.figure)
return c.draw(renderer)
| gpl-3.0 |
xwolf12/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
ClinicalGraphics/scikit-image | doc/examples/segmentation/plot_peak_local_max.py | 6 | 1443 | """
====================
Finding local maxima
====================
The ``peak_local_max`` function returns the coordinates of local peaks (maxima)
in an image. A maximum filter is used for finding local maxima. This operation
dilates the original image and merges neighboring local maxima closer than the
size of the dilation. Locations where the original image is equal to the
dilated image are returned as local maxima.
"""
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
from skimage import data, img_as_float
im = img_as_float(data.coins())
# image_max is the dilation of im with a 20*20 structuring element
# It is used within peak_local_max function
image_max = ndi.maximum_filter(im, size=20, mode='constant')
# Comparison between image_max and im to find the coordinates of local maxima
coordinates = peak_local_max(im, min_distance=20)
# display results
fig, ax = plt.subplots(1, 3, figsize=(8, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax1, ax2, ax3 = ax.ravel()
ax1.imshow(im, cmap=plt.cm.gray)
ax1.axis('off')
ax1.set_title('Original')
ax2.imshow(image_max, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Maximum filter')
ax3.imshow(im, cmap=plt.cm.gray)
ax3.autoscale(False)
ax3.plot(coordinates[:, 1], coordinates[:, 0], 'r.')
ax3.axis('off')
ax3.set_title('Peak local max')
fig.tight_layout()
plt.show()
| bsd-3-clause |
cl4rke/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
lambdaloop/htm-challenge | brainsquared/analytics/debug/plot_signal_mu.py | 2 | 2960 | # #!/usr/bin/env python2
from cloudbrain.subscribers.PikaSubscriber import PikaSubscriber
from cloudbrain.settings import RABBITMQ_ADDRESS
import matplotlib.pyplot as plt
import numpy as np
import time
import json
import mne
from scipy import signal
device_id = "brainsquared"
device_name = "openbci"
metric = 'mu'
host = RABBITMQ_ADDRESS
buffer_size = 100
fig = plt.figure()
ax = fig.add_subplot(111)
N_points = 128
extra = 0
data = [0 for i in range(N_points)]
data2 = [0 for i in range(N_points)]
# data[0] = 0
# data[1] = 0
count = 0
b1, a1 = signal.iirfilter(1, [59.0/125.0, 61.0/125.0], btype='bandstop')
b2, a2 = signal.iirfilter(1, 3.0/125.0, btype='highpass')
# some X and Y data
x = np.arange(N_points)
y = data
# lis = [None] * 8
li, = ax.plot(x, y)
li2, = ax.plot(x, y)
# draw and show it
fig.canvas.draw()
plt.show(block=False)
def update_plot():
global data, data2, b, a
# plt.clf()
# set the new data
data_f = data
# data_f = signal.lfilter(b1, a1, data_f)
# data_f = signal.lfilter(b2, a2, data_f)
data_f = data_f[-N_points:]
# data_f = data
li.set_ydata(data)
li2.set_ydata(data2)
ax.relim()
ax.autoscale_view(True,True,True)
fig.canvas.draw()
# plt.clf()
# spec = plt.specgram(data_f, NFFT=128, noverlap=32, Fs=250, detrend='mean', pad_to=256,
# scale_by_freq=True)
# # X = mne.time_frequency.stft(data, wsize)
# # freqs = mne.time_frequency.stftfreq(wsize, sfreq=fs)
# # imshow(np.log(abs(X[0])), aspect='auto',
# # origin='lower', interpolation="None",
# # vmin=-14, vmax=-4,
# # extent=[0, 4, 0, max(freqs)])
# # ylim([0, 60])
plt.draw()
# # draw()
# time.sleep(0.001)
plt.pause(0.0001) #add this it will be OK.
def consume_eeg(connection,deliver,properties,msg_s):
global data, data2, count
# print(msg_s)
msg = json.loads(msg_s)
d = []
# for row in msg[-32:]:
# # print(row)
# d.append(float(row['channel_0']))
print(msg['left'], msg['right'])
# print(len(msg))
data.append(msg['left'])
data2.append(msg['right'])
data = data[-N_points-extra:]
data2 = data2[-N_points-extra:]
update_plot()
eeg_subscriber = PikaSubscriber(device_name, device_id, host, metric)
eeg_subscriber.connect()
eeg_subscriber.consume_messages(consume_eeg)
# # loop to update the data
# while True:
# try:
# try:
# x = float(s.readline().strip())
# except ValueError:
# continue
# data.append(x)
# data = data[-100:]
# # set the new data
# li.set_ydata(data)
# ax.relim()
# ax.autoscale_view(True,True,True)
# fig.canvas.draw()
# # time.sleep(0.001)
# plt.pause(0.0001) #add this it will be OK.
# except KeyboardInterrupt:
# break
| agpl-3.0 |
btabibian/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 79 | 2189 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
apache/beam | sdks/python/apache_beam/dataframe/io_test.py | 5 | 12307 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import importlib
import math
import os
import platform
import shutil
import tempfile
import typing
import unittest
from datetime import datetime
from io import BytesIO
from io import StringIO
import pandas as pd
import pandas.testing
import pytest
from pandas.testing import assert_frame_equal
from parameterized import parameterized
import apache_beam as beam
from apache_beam.dataframe import convert
from apache_beam.dataframe import io
from apache_beam.io import restriction_trackers
from apache_beam.testing.util import assert_that
class MyRow(typing.NamedTuple):
timestamp: int
value: int
@unittest.skipIf(platform.system() == 'Windows', 'BEAM-10929')
class IOTest(unittest.TestCase):
def setUp(self):
self._temp_roots = []
def tearDown(self):
for root in self._temp_roots:
shutil.rmtree(root)
def temp_dir(self, files=None):
dir = tempfile.mkdtemp(prefix='beam-test')
self._temp_roots.append(dir)
if files:
for name, contents in files.items():
with open(os.path.join(dir, name), 'w') as fout:
fout.write(contents)
return dir + os.path.sep
def read_all_lines(self, pattern, delete=False):
for path in glob.glob(pattern):
with open(path) as fin:
# TODO(Py3): yield from
for line in fin:
yield line.rstrip('\n')
if delete:
os.remove(path)
def test_read_write_csv(self):
input = self.temp_dir({'1.csv': 'a,b\n1,2\n', '2.csv': 'a,b\n3,4\n'})
output = self.temp_dir()
with beam.Pipeline() as p:
df = p | io.read_csv(input + '*.csv')
df['c'] = df.a + df.b
df.to_csv(output + 'out.csv', index=False)
self.assertCountEqual(['a,b,c', '1,2,3', '3,4,7'],
set(self.read_all_lines(output + 'out.csv*')))
@pytest.mark.uses_pyarrow
def test_read_write_parquet(self):
self._run_read_write_test(
'parquet', {}, {}, dict(check_index=False), ['pyarrow'])
@parameterized.expand([
('csv', dict(index_col=0)),
('csv', dict(index_col=0, splittable=True)),
('json', dict(orient='index'), dict(orient='index')),
('json', dict(orient='columns'), dict(orient='columns')),
('json', dict(orient='split'), dict(orient='split')),
(
'json',
dict(orient='values'),
dict(orient='values'),
dict(check_index=False, check_names=False)),
(
'json',
dict(orient='records'),
dict(orient='records'),
dict(check_index=False)),
(
'json',
dict(orient='records', lines=True),
dict(orient='records', lines=True),
dict(check_index=False)),
('html', dict(index_col=0), {}, {}, ['lxml']),
('excel', dict(index_col=0), {}, {}, ['openpyxl', 'xlrd']),
])
# pylint: disable=dangerous-default-value
def test_read_write(
self,
format,
read_kwargs={},
write_kwargs={},
check_options={},
requires=()):
self._run_read_write_test(
format, read_kwargs, write_kwargs, check_options, requires)
# pylint: disable=dangerous-default-value
def _run_read_write_test(
self,
format,
read_kwargs={},
write_kwargs={},
check_options={},
requires=()):
for module in requires:
try:
importlib.import_module(module)
except ImportError:
raise unittest.SkipTest('Missing dependency: %s' % module)
small = pd.DataFrame({'label': ['11a', '37a', '389a'], 'rank': [0, 1, 2]})
big = pd.DataFrame({'number': list(range(1000))})
big['float'] = big.number.map(math.sqrt)
big['text'] = big.number.map(lambda n: 'f' + 'o' * n)
def frame_equal_to(expected_, check_index=True, check_names=True):
def check(actual):
expected = expected_
try:
actual = pd.concat(actual)
if not check_index:
expected = expected.sort_values(list(
expected.columns)).reset_index(drop=True)
actual = actual.sort_values(list(
actual.columns)).reset_index(drop=True)
if not check_names:
actual = actual.rename(
columns=dict(zip(actual.columns, expected.columns)))
return assert_frame_equal(expected, actual, check_like=True)
except:
print("EXPECTED")
print(expected)
print("ACTUAL")
print(actual)
raise
return check
for df in (small, big):
with tempfile.TemporaryDirectory() as dir:
dest = os.path.join(dir, 'out')
try:
with beam.Pipeline() as p:
deferred_df = convert.to_dataframe(
p | beam.Create([df[::3], df[1::3], df[2::3]]), proxy=df[:0])
# This does the write.
getattr(deferred_df, 'to_%s' % format)(dest, **write_kwargs)
with beam.Pipeline() as p:
# Now do the read.
# TODO(robertwb): Allow reading from pcoll of paths to do it all in
# one pipeline.
result = convert.to_pcollection(
p | getattr(io, 'read_%s' % format)(dest + '*', **read_kwargs),
yield_elements='pandas')
assert_that(result, frame_equal_to(df, **check_options))
except:
os.system('head -n 100 ' + dest + '*')
raise
def _run_truncating_file_handle_test(
self, s, splits, delim=' ', chunk_size=10):
split_results = []
next_range = restriction_trackers.OffsetRange(0, len(s))
for split in list(splits) + [None]:
tracker = restriction_trackers.OffsetRestrictionTracker(next_range)
handle = io._TruncatingFileHandle(
StringIO(s), tracker, splitter=io._DelimSplitter(delim, chunk_size))
data = ''
chunk = handle.read(1)
if split is not None:
_, next_range = tracker.try_split(split)
while chunk:
data += chunk
chunk = handle.read(7)
split_results.append(data)
return split_results
def test_truncating_filehandle(self):
self.assertEqual(
self._run_truncating_file_handle_test('a b c d e', [0.5]),
['a b c ', 'd e'])
self.assertEqual(
self._run_truncating_file_handle_test('aaaaaaaaaaaaaaXaaa b', [0.5]),
['aaaaaaaaaaaaaaXaaa ', 'b'])
self.assertEqual(
self._run_truncating_file_handle_test(
'aa bbbbbbbbbbbbbbbbbbbbbbbbbb ccc ', [0.01, 0.5]),
['aa ', 'bbbbbbbbbbbbbbbbbbbbbbbbbb ', 'ccc '])
numbers = 'x'.join(str(k) for k in range(1000))
splits = self._run_truncating_file_handle_test(
numbers, [0.1] * 20, delim='x')
self.assertEqual(numbers, ''.join(splits))
self.assertTrue(s.endswith('x') for s in splits[:-1])
self.assertLess(max(len(s) for s in splits), len(numbers) * 0.9 + 10)
self.assertGreater(
min(len(s) for s in splits), len(numbers) * 0.9**20 * 0.1)
@parameterized.expand([
('defaults', dict()),
('header', dict(header=1)),
('multi_header', dict(header=[0, 1])),
('multi_header', dict(header=[0, 1, 4])),
('names', dict(names=('m', 'n', 'o'))),
('names_and_header', dict(names=('m', 'n', 'o'), header=0)),
('skip_blank_lines', dict(header=4, skip_blank_lines=True)),
('skip_blank_lines', dict(header=4, skip_blank_lines=False)),
('comment', dict(comment='X', header=4)),
('comment', dict(comment='X', header=[0, 3])),
('skiprows', dict(skiprows=0, header=[0, 1])),
('skiprows', dict(skiprows=[1], header=[0, 3], skip_blank_lines=False)),
('skiprows', dict(skiprows=[0, 1], header=[0, 1], comment='X')),
])
def test_csv_splitter(self, name, kwargs):
def assert_frame_equal(expected, actual):
try:
pandas.testing.assert_frame_equal(expected, actual)
except AssertionError:
print("Expected:\n", expected)
print("Actual:\n", actual)
raise
def read_truncated_csv(start, stop):
return pd.read_csv(
io._TruncatingFileHandle(
BytesIO(contents.encode('ascii')),
restriction_trackers.OffsetRestrictionTracker(
restriction_trackers.OffsetRange(start, stop)),
splitter=io._CsvSplitter((), kwargs, read_chunk_size=7)),
index_col=0,
**kwargs)
contents = '''
a0, a1, a2
b0, b1, b2
X , c1, c2
e0, e1, e2
f0, f1, f2
w, daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaata, w
x, daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaata, x
y, daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaata, y
z, daaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaata, z
'''.strip()
expected = pd.read_csv(StringIO(contents), index_col=0, **kwargs)
one_shard = read_truncated_csv(0, len(contents))
assert_frame_equal(expected, one_shard)
equal_shards = pd.concat([
read_truncated_csv(0, len(contents) // 2),
read_truncated_csv(len(contents) // 2, len(contents)),
])
assert_frame_equal(expected, equal_shards)
three_shards = pd.concat([
read_truncated_csv(0, len(contents) // 3),
read_truncated_csv(len(contents) // 3, len(contents) * 2 // 3),
read_truncated_csv(len(contents) * 2 // 3, len(contents)),
])
assert_frame_equal(expected, three_shards)
# https://github.com/pandas-dev/pandas/issues/38292
if not isinstance(kwargs.get('header'), list):
split_in_header = pd.concat([
read_truncated_csv(0, 1),
read_truncated_csv(1, len(contents)),
])
assert_frame_equal(expected, split_in_header)
if not kwargs:
# Make sure we're correct as we cross the header boundary.
# We don't need to do this for every permutation.
header_end = contents.index('a2') + 3
for split in range(header_end - 2, header_end + 2):
split_at_header = pd.concat([
read_truncated_csv(0, split),
read_truncated_csv(split, len(contents)),
])
assert_frame_equal(expected, split_at_header)
def test_file_not_found(self):
with self.assertRaisesRegex(FileNotFoundError, r'/tmp/fake_dir/\*\*'):
with beam.Pipeline() as p:
_ = p | io.read_csv('/tmp/fake_dir/**')
def test_windowed_write(self):
output = self.temp_dir()
with beam.Pipeline() as p:
pc = (
p | beam.Create([MyRow(timestamp=i, value=i % 3) for i in range(20)])
| beam.Map(lambda v: beam.window.TimestampedValue(v, v.timestamp)).
with_output_types(MyRow)
| beam.WindowInto(
beam.window.FixedWindows(10)).with_output_types(MyRow))
deferred_df = convert.to_dataframe(pc)
deferred_df.to_csv(output + 'out.csv', index=False)
first_window_files = (
f'{output}out.csv-'
f'{datetime.utcfromtimestamp(0).isoformat()}*')
self.assertCountEqual(
['timestamp,value'] + [f'{i},{i%3}' for i in range(10)],
set(self.read_all_lines(first_window_files, delete=True)))
second_window_files = (
f'{output}out.csv-'
f'{datetime.utcfromtimestamp(10).isoformat()}*')
self.assertCountEqual(
['timestamp,value'] + [f'{i},{i%3}' for i in range(10, 20)],
set(self.read_all_lines(second_window_files, delete=True)))
# Check that we've read (and removed) every output file
self.assertEqual(len(glob.glob(f'{output}out.csv*')), 0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Jozhogg/iris | lib/iris/tests/test_mapping.py | 1 | 8078 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Tests map creation.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import range
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import numpy.testing as np_testing
import cartopy.crs as ccrs
import iris
import iris.coord_systems
import iris.cube
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
# A specific cartopy Globe matching the iris RotatedGeogCS default.
_DEFAULT_GLOBE = ccrs.Globe(semimajor_axis=6371229.0,
semiminor_axis=6371229.0,
ellipse=None)
@tests.skip_plot
class TestBasic(tests.GraphicsTest):
cube = iris.tests.stock.realistic_4d()
def test_contourf(self):
cube = self.cube[0, 0]
iplt.contourf(cube)
self.check_graphic()
def test_pcolor(self):
cube = self.cube[0, 0]
iplt.pcolor(cube)
self.check_graphic()
def test_unmappable(self):
cube = self.cube[0, 0]
cube.coord('grid_longitude').standard_name = None
iplt.contourf(cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5,
globe=_DEFAULT_GLOBE))
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(3.59579163e+02, 3.59669159e+02, -1.28250003e-01, -3.82499993e-02),
decimal=3)
@tests.skip_data
@tests.skip_plot
class TestUnmappable(tests.GraphicsTest):
def setUp(self):
src_cube = iris.tests.stock.global_pp()
# Make a cube that can't be located on the globe.
cube = iris.cube.Cube(src_cube.data)
cube.add_dim_coord(
iris.coords.DimCoord(np.arange(96, dtype=np.float32) * 100,
long_name='x', units='m'),
1)
cube.add_dim_coord(
iris.coords.DimCoord(np.arange(73, dtype=np.float32) * 100,
long_name='y', units='m'),
0)
cube.standard_name = 'air_temperature'
cube.units = 'K'
cube.assert_valid()
self.cube = cube
def test_simple(self):
iplt.contourf(self.cube)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestMappingSubRegion(tests.GraphicsTest):
def setUp(self):
cube_path = tests.get_data_path(
('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
cube = iris.load_cube(cube_path)[0]
# make the data smaller to speed things up.
self.cube = cube[::10, ::10]
def test_simple(self):
# First sub-plot
plt.subplot(221)
plt.title('Default')
iplt.contourf(self.cube)
plt.gca().coastlines()
# Second sub-plot
plt.subplot(222, projection=ccrs.Mollweide(central_longitude=120))
plt.title('Molleweide')
iplt.contourf(self.cube)
plt.gca().coastlines()
# Third sub-plot (the projection part is redundant, but a useful
# test none-the-less)
ax = plt.subplot(223, projection=iplt.default_projection(self.cube))
plt.title('Native')
iplt.contour(self.cube)
ax.coastlines()
# Fourth sub-plot
ax = plt.subplot(2, 2, 4, projection=ccrs.PlateCarree())
plt.title('PlateCarree')
iplt.contourf(self.cube)
ax.coastlines()
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5,
globe=_DEFAULT_GLOBE))
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(313.01998901, 391.11999512, -22.48999977, 24.80999947))
@tests.skip_data
@tests.skip_plot
class TestLowLevel(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.global_pp()
self.few = 4
self.few_levels = list(range(280, 300, 5))
self.many_levels = np.linspace(
self.cube.data.min(), self.cube.data.max(), 40)
def test_simple(self):
iplt.contour(self.cube)
self.check_graphic()
def test_params(self):
iplt.contourf(self.cube, self.few)
self.check_graphic()
iplt.contourf(self.cube, self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, self.many_levels)
self.check_graphic()
def test_keywords(self):
iplt.contourf(self.cube, levels=self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, levels=self.many_levels, alpha=0.5)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestBoundedCube(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.global_pp()
# Add some bounds to this data (this will actually make the bounds
# invalid as they will straddle the north pole and overlap on the
# dateline, but that doesn't matter for this test.)
self.cube.coord('latitude').guess_bounds()
self.cube.coord('longitude').guess_bounds()
def test_pcolormesh(self):
# pcolormesh can only be drawn in native coordinates (or more
# specifically, in coordinates that don't wrap).
plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.outline(self.cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(iplt.default_projection(self.cube),
ccrs.PlateCarree())
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
[0., 360., -89.99995422, 89.99998474])
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(
self.cube, mode=iris.coords.BOUND_MODE),
[-1.875046, 358.124954, -91.24995422, 91.24998474])
@tests.skip_data
@tests.skip_plot
class TestLimitedAreaCube(tests.GraphicsTest):
def setUp(self):
cube_path = tests.get_data_path(('PP', 'aPProt1', 'rotated.pp'))
self.cube = iris.load_cube(cube_path)[::20, ::20]
self.cube.coord('grid_latitude').guess_bounds()
self.cube.coord('grid_longitude').guess_bounds()
def test_pcolormesh(self):
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.pcolormesh(self.cube, facecolors='none', edgecolors='blue')
# the result is a graphic which has coloured edges. This is a mpl bug,
# see https://github.com/matplotlib/matplotlib/issues/1302
self.check_graphic()
def test_outline(self):
iplt.outline(self.cube)
self.check_graphic()
def test_scatter(self):
iplt.points(self.cube)
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
FourthLion/pydatasentry | bin/sentry.py | 1 | 4559 | #!/usr/bin/env python
"""
sentry.py allows instrumenting a python/pandas program with no
modifications to the program itself. Note that only python 3 is supported.
::
sentry.py help
sentry.py init <sentry-conf.py>
sentry.py example <filename.py>
sentry.py [run|commit] [-c <sentry-conf.py>] <python-program-to-be-instrumented>"
run and commit are almost the same. The latter suggest final
run. Only committed runs are stored/uploaded.
"""
import pydatasentry
import os, sys
import imp
import shutil
from importlib.machinery import SourceFileLoader
def load_program():
"""
Load the user's command line
"""
path = sys.argv[1]
with open(path) as f:
code = compile(f.read(), path, 'exec')
ldict = locals()
exec(code, globals(), ldict)
def load_configuration(conf):
if conf is None:
return {}
conf = os.path.abspath(conf)
if not os.path.exists(conf):
print("Configuration file not present:", conf)
sys.exit()
print("Configuration path", conf)
mod = SourceFileLoader("module.name", conf).load_module()
return mod.get_config()
def sentry_help():
print("sentry: Transparently instrument pandas code")
print("sentry.py help")
print('sentry.py init <sentry-conf.py>')
print('sentry.py example <basic_ols.py>')
print('sentry.py run [-c|--config <sentry-conf.py>] <python-program-to-be-instrumented>')
def initialize(conf):
"""
Initialize a sentry configuration file
:param conf: sentry configuration file
"""
if os.path.exists(conf):
print("File already exists. Please remove first:", conf)
sys.exit()
rootdir = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
template = os.path.realpath(os.path.join(rootdir,
"share",
"sentry-conf.py.template"))
shutil.copyfile(template, conf)
print("Updated", conf)
def example(path):
"""
Initialize a sentry configuration file
:param conf: sentry configuration file
"""
if os.path.exists(path):
print("File already exists. Please remove first:", path)
sys.exit()
rootdir = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
template = os.path.realpath(os.path.join(rootdir,
"share",
"basic_ols.py.template"))
shutil.copyfile(template, path)
print("Updated", path)
def main():
offset = 1
conf=None
# Check for help...
if len(sys.argv) == 1 or sys.argv[1] in ["help"]:
sentry_help()
sys.exit()
cmd = sys.argv[0]
sys.argv = sys.argv[1:]
if sys.argv[0] in ["init"]:
if len(sys.argv) < 2:
print("Missing filename argument")
sentry_help()
sys.exit()
initialize(conf=sys.argv[1])
sys.exit()
if sys.argv[0] in ["example"]:
if len(sys.argv) < 2:
print("Missing filename argument")
sentry_help()
sys.exit()
example(path=sys.argv[1])
if sys.argv[0] in ["run", "commit"]:
runcmd = sys.argv[0]
if len(sys.argv) < 2:
print("Missing arguments")
sentry_help()
sys.exit()
# Handle the configuration option...
sys.argv = sys.argv[1:]
print("Before config", sys.argv)
if sys.argv[0] in ["-c", "--conf"]:
if len(sys.argv) < 3:
print("Missing configuration file")
sentry_help()
sys.exit()
conf = sys.argv[1]
config = load_configuration(conf)
sys.argv = sys.argv[2:]
else:
config = {}
if 'spec' not in config:
config['spec'] = {}
config['spec']['run'] = runcmd
if sys.argv[0] in ["-m", "--message"]:
if len(sys.argv) < 3:
print("Missing configuration file")
sentry_help()
sys.exit()
message = sys.argv[1]
config['spec']['message'] = message
sys.argv = sys.argv[2:]
print("Found config", config)
pydatasentry.initialize(config)
# Now load the program...
sys.argv.insert(0, cmd)
load_program()
if __name__ == "__main__":
main()
| mit |
mattgiguere/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/tree/export.py | 12 | 16020 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# License: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
astrilet/CARTAvis | carta/scriptedClient/tests/ImageUtil.py | 9 | 7066 | # Image related functions are detailed here.
import os
import time
import cv2
from cv2 import cv
import numpy as np
import pyautogui
import Image
from matplotlib import pyplot as plt
# Returns a cropped image of a specific location from the desktop
# screenshot. Mainly for usage to get textboxes for later image
# analysis, namely getting textbox values.
def get_croppedImage(cropImage, matchCoords, saveImageName):
# Crop the image according to the matching coordinates and save image
img = cv2.imread( cropImage )
MPx, MPx2, MPy, MPy2 = matchCoords
# Crop and save the image
cropped = img[ MPy:MPy2, MPx:MPx2 ]
cv2.imwrite( saveImageName, cropped )
# Allow image to be saved before continuing
time.sleep(5)
return cropped
# Locate the needle image from the current desktop screen.
# First take a screenshot of the desktop, then perform image
# matching and return the coordinates of the image (left corner).
def locateOnScreen(needleImg):
# Take a screenshot image of the desktop and save it
img = pyautogui.screenshot("desktop.png")
# Allow image to be saved prior to continuing
time.sleep(5)
# Get the matching coordinates
matchCoords = get_match_coordinates( needleImg, "desktop.png")
return matchCoords
# Locates a needle image on the screen. First take a screenshot of the
# desktop, then perform image matching and returns the (x, y)
# coordinates of the center position where the image has been found
def locateCenterOnScreen(needleImg):
# Take a screenshot image of the desktop and save it
img = pyautogui.screenshot( "desktop.png")
# Allow image to be saved prior to continuing
time.sleep(5)
# Get the matching coordinates
matchCoords = get_match_coordinates( needleImg, "desktop.png")
# If there are no matching coordinates, return nothing
if matchCoords == None:
return None
else:
coords = locateCenter( matchCoords )
return coords
# Perform image matching to determine the location of the needle image
# and return the coordinates of the matching location
def get_match_coordinates(needleImg, haystackImg):
# Specify the method for image matching
method = cv.CV_TM_SQDIFF_NORMED
imageName = needleImg
# Allow open cv to read the needle and haystack images
# We want to find the needle image in the haystack image
needleImg = cv2.imread( needleImg )
haystackImg = cv2.imread( haystackImg )
# Perform image matching and save result
result = cv2.matchTemplate( needleImg, haystackImg, method )
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
#print imageName, minVal
# Set a threshold for image match accuracy
# Image match will otherwise always return the "best match"
if minVal > 0.03: #0.05
matchCoords = None
else:
# minLoc variable stores the upper left corner coordinates
# where the needle image was found in the haystack image
MPx, MPy = minLoc
# Find the rectangular region where the needle image matches the
# haystack image and return coordinates of the match
trows, tcols = needleImg.shape[:2]
matchCoords = (MPx, MPx+tcols, MPy, MPy+trows)
# Uncomment if you want to see rectangular region
# Draw a rectangle around match region to check that the match region is correct
#cv2.rectangle(haystackImg, (MPx, MPy),(MPx+tcols, MPy+trows), (0,0,225),1)
# Save the resulting image as a png image
#cv2.imwrite( 'result.png', haystackImg )
# print matchCoords
return matchCoords
# Find the center of the coordinates
def locateCenter(coords):
MPx, MPx2, MPy, MPy2 = coords
# Get the center location of the image match
xCoord = (MPx + MPx2)/2
yCoord = (MPy + MPy2)/2
return ( xCoord, yCoord )
# Find all instances of an image on the desktop and return the coordinates of all instances
def locateAllOnScreen(needleImg):
# Take a screenshot image of the desktop and save it
img = pyautogui.screenshot( "desktop.png")
# Allow image to be saved prior to continuing
time.sleep(5)
# Allow opencv2 to read both images
haystackImg = cv2.imread( "desktop.png")
grayImg = cv2.cvtColor( haystackImg, cv2.COLOR_BGR2GRAY)
needleImg = cv2.imread( needleImg ,0)
width , height = needleImg.shape[::-1]
# Use thresholding to find multiple matches
res = cv2.matchTemplate( grayImg, needleImg, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
matchCoords = np.where( res >= threshold)
#Uncomment to view the rectangular match regions
#for pt in zip(*matchCoords[::-1]):
# cv2.rectangle( haystackImg, pt, (pt[0] + width, pt[1] + height), (0,0,255), 2)
# Save the resulting image as a png image
#cv2.imwrite('result.png', haystackImg )
return matchCoords
# Perform image preprocessing prior to image recognition. The image will
# be geometrically corrected, denoised, light-contrasted, and resolution corrected.
# This is to prevent the python OCR wrapper from incorrectly reading the image.
def image_preprocessing(imageName):
# Open the screenshot png image as a grayscale image
img = cv2.imread( imageName, 0 )
# Record the dimensions of the image
height, width = img.shape
# Denoise the image
img = cv2.fastNlMeansDenoising( img, 10, 10, 7, 21 )
# We enlarge the image to the following dimensions: 200 pixels in height
# Note that we resize images while maintaining the aspect ratio.
if float( height ) != 100:
baseheight = 100
# Determine the percentage of the new basewidth from the original width
hpercent = ( baseheight / float( height ) )
# Multiply the original height by the width percentage
wsize = int( (float(width * float(hpercent))) )
# Resize the image based on the basewidth and new height
img = cv2.resize(img, (int(wsize), int(baseheight)))
# Perform binarization of the image (covert to black and white)
ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# Save and replace the old image with the new image
cv2.imwrite( imageName, img)
# Now sharpen the image and improve resolution with PIL
im = Image.open( imageName )
# Save the image with higher resolution
im.save(imageName, dpi=(600,600))
# Wait for image to save
time.sleep(10)
# Enlarge the image to a specific height and width
# Does not maintain the aspect ratio of the image
def image_enlarge(imageName, newHeight, newWidth, newImage):
# Open the screenshot png image
img = cv2.imread( imageName )
# Record the dimensions of the image
height, width, depth = img.shape
baseheight = newHeight
width = newWidth
# Resize the image based on the basewidth and new height
img = cv2.resize(img, (int(width), int(baseheight)))
# Save the image
cv2.imwrite( newImage, img) | gpl-2.0 |
simon-pepin/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
draperjames/bokeh | examples/plotting/file/burtin.py | 10 | 4818 | from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_file
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="black",
background_fill_color="#f0e1d2", border_fill_color="#f0e1d2",
toolbar_sticky=False)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
0, 0, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(0, 0, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(0, 0, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(0, 0, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(0, 0, radius=radii, fill_color=None, line_color="white")
p.text(0, radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(0, 0, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color),
text_font_size="9pt", text_align="left", text_baseline="middle")
output_file("burtin.html", title="burtin.py example")
show(p)
| bsd-3-clause |
adamrp/qiita | qiita_db/test/test_analysis.py | 1 | 27363 | from unittest import TestCase, main
from os import remove
from os.path import exists, join
from datetime import datetime
from shutil import move
from biom import load_table
import pandas as pd
from pandas.util.testing import assert_frame_equal
from qiita_core.util import qiita_test_checker
from qiita_core.qiita_settings import qiita_config
from qiita_db.analysis import Analysis, Collection
from qiita_db.job import Job
from qiita_db.user import User
from qiita_db.exceptions import (QiitaDBStatusError, QiitaDBError,
QiitaDBUnknownIDError)
from qiita_db.util import get_mountpoint, get_count
from qiita_db.study import Study, StudyPerson
from qiita_db.data import ProcessedData
from qiita_db.metadata_template import SampleTemplate
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestAnalysis(TestCase):
def setUp(self):
self.analysis = Analysis(1)
self.portal = qiita_config.portal
_, self.fp = get_mountpoint("analysis")[0]
self.biom_fp = join(self.fp, "1_analysis_18S.biom")
self.map_fp = join(self.fp, "1_analysis_mapping.txt")
def tearDown(self):
qiita_config.portal = self.portal
with open(self.biom_fp, 'w') as f:
f.write("")
with open(self.map_fp, 'w') as f:
f.write("")
fp = join(get_mountpoint('analysis')[0][1], 'testfile.txt')
if exists(fp):
remove(fp)
mp = get_mountpoint("processed_data")[0][1]
study2fp = join(mp, "2_2_study_1001_closed_reference_otu_table.biom")
if exists(study2fp):
move(study2fp,
join(mp, "2_study_1001_closed_reference_otu_table.biom"))
def test_lock_check(self):
for status in ["queued", "running", "public", "completed",
"error"]:
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis")
new.status = status
with self.assertRaises(QiitaDBStatusError):
new._lock_check()
def test_lock_check_ok(self):
self.analysis.status = "in_construction"
self.analysis._lock_check()
def test_status_setter_checks(self):
self.analysis.status = "public"
with self.assertRaises(QiitaDBStatusError):
self.analysis.status = "queued"
def test_get_by_status(self):
qiita_config.portal = 'QIITA'
self.assertEqual(Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'EMP'
self.assertEqual(Analysis.get_by_status('public'), set([]))
self.analysis.status = "public"
qiita_config.portal = 'QIITA'
self.assertEqual(Analysis.get_by_status('public'), {1})
qiita_config.portal = 'EMP'
self.assertEqual(Analysis.get_by_status('public'), set([]))
def test_has_access_public(self):
self.conn_handler.execute("UPDATE qiita.analysis SET "
"analysis_status_id = 6")
qiita_config.portal = 'QIITA'
self.assertTrue(self.analysis.has_access(User("[email protected]")))
qiita_config.portal = 'EMP'
self.assertFalse(self.analysis.has_access(User("[email protected]")))
def test_has_access_shared(self):
self.assertTrue(self.analysis.has_access(User("[email protected]")))
def test_has_access_private(self):
self.assertTrue(self.analysis.has_access(User("[email protected]")))
def test_has_access_admin(self):
qiita_config.portal = 'QIITA'
self.assertTrue(self.analysis.has_access(User("[email protected]")))
qiita_config.portal = 'EMP'
with self.assertRaises(QiitaDBError):
Analysis(1).has_access(User("[email protected]"))
def test_has_access_no_access(self):
self.assertFalse(self.analysis.has_access(User("[email protected]")))
def test_create(self):
sql = "SELECT EXTRACT(EPOCH FROM NOW())"
time1 = float(self.conn_handler.execute_fetchall(sql)[0][0])
new_id = get_count("qiita.analysis") + 1
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis")
self.assertEqual(new.id, new_id)
sql = ("SELECT analysis_id, email, name, description, "
"analysis_status_id, pmid, EXTRACT(EPOCH FROM timestamp) "
"FROM qiita.analysis WHERE analysis_id = %s")
obs = self.conn_handler.execute_fetchall(sql, [new_id])
self.assertEqual(obs[0][:-1], [new_id, '[email protected]', 'newAnalysis',
'A New Analysis', 1, None])
self.assertTrue(time1 < float(obs[0][-1]))
# make sure portal is associated
obs = self.conn_handler.execute_fetchall(
"SELECT * from qiita.analysis_portal WHERE analysis_id = %s",
[new_id])
self.assertEqual(obs, [[new_id, 1]])
def test_create_nonqiita_portal(self):
new_id = get_count("qiita.analysis") + 1
qiita_config.portal = "EMP"
Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis")
# make sure portal is associated
obs = self.conn_handler.execute_fetchall(
"SELECT * from qiita.analysis_portal WHERE analysis_id = %s",
[new_id])
self.assertEqual(obs, [[new_id, 2], [new_id, 1]])
def test_create_parent(self):
sql = "SELECT EXTRACT(EPOCH FROM NOW())"
time1 = float(self.conn_handler.execute_fetchall(sql)[0][0])
new_id = get_count("qiita.analysis") + 1
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
self.assertEqual(new.id, new_id)
sql = ("SELECT analysis_id, email, name, description, "
"analysis_status_id, pmid, EXTRACT(EPOCH FROM timestamp) "
"FROM qiita.analysis WHERE analysis_id = %s")
obs = self.conn_handler.execute_fetchall(sql, [new_id])
self.assertEqual(obs[0][:-1], [new_id, '[email protected]', 'newAnalysis',
'A New Analysis', 1, None])
self.assertTrue(time1 < float(obs[0][-1]))
sql = "SELECT * FROM qiita.analysis_chain WHERE child_id = %s"
obs = self.conn_handler.execute_fetchall(sql, [new_id])
self.assertEqual(obs, [[1, new_id]])
def test_create_from_default(self):
new_id = get_count("qiita.analysis") + 1
owner = User("[email protected]")
new = Analysis.create(owner, "newAnalysis",
"A New Analysis", from_default=True)
self.assertEqual(new.id, new_id)
self.assertEqual(new.step, 3)
# Make sure samples were transfered properly
sql = "SELECT * FROM qiita.analysis_sample WHERE analysis_id = %s"
obs = self.conn_handler.execute_fetchall(sql, [owner.default_analysis])
exp = []
self.assertEqual(obs, exp)
sql = "SELECT * FROM qiita.analysis_sample WHERE analysis_id = %s"
obs = self.conn_handler.execute_fetchall(sql, [new_id])
exp = [[new_id, 1, '1.SKD8.640184'],
[new_id, 1, '1.SKB7.640196'],
[new_id, 1, '1.SKM9.640192'],
[new_id, 1, '1.SKM4.640180']]
self.assertEqual(obs, exp)
def test_exists(self):
qiita_config.portal = 'QIITA'
self.assertTrue(Analysis.exists(1))
new_id = get_count("qiita.analysis") + 1
self.assertFalse(Analysis.exists(new_id))
qiita_config.portal = 'EMP'
self.assertFalse(Analysis.exists(1))
new_id = get_count("qiita.analysis") + 1
self.assertFalse(Analysis.exists(new_id))
def test_delete(self):
# successful delete
total_analyses = get_count("qiita.analysis")
Analysis.delete(1)
self.assertEqual(total_analyses - 1, get_count("qiita.analysis"))
# no possible to delete
with self.assertRaises(QiitaDBUnknownIDError):
Analysis.delete(total_analyses + 1)
def test_retrieve_owner(self):
self.assertEqual(self.analysis.owner, "[email protected]")
def test_retrieve_name(self):
self.assertEqual(self.analysis.name, "SomeAnalysis")
def test_retrieve_description(self):
self.assertEqual(self.analysis.description, "A test analysis")
def test_set_description(self):
self.analysis.description = "New description"
self.assertEqual(self.analysis.description, "New description")
def test_retrieve_samples(self):
exp = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']}
self.assertEqual(self.analysis.samples, exp)
def test_retrieve_dropped_samples(self):
# Create and populate second study to do test with
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
metadata_dict = {
'SKB8.640193': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': True,
'dna_extracted': True,
'sample_type': 'type1',
'required_sample_info_status': 'received',
'collection_timestamp':
datetime(2014, 5, 29, 12, 24, 51),
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'str_column': 'Value for sample 1',
'latitude': 42.42,
'longitude': 41.41,
'taxon_id': 9606,
'scientific_name': 'homo sapiens'},
'SKD8.640184': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': True,
'dna_extracted': True,
'sample_type': 'type1',
'required_sample_info_status': 'received',
'collection_timestamp':
datetime(2014, 5, 29, 12, 24, 51),
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 2',
'str_column': 'Value for sample 2',
'latitude': 4.2,
'longitude': 1.1,
'taxon_id': 9606,
'scientific_name': 'homo sapiens'},
'SKB7.640196': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': True,
'dna_extracted': True,
'sample_type': 'type1',
'required_sample_info_status': 'received',
'collection_timestamp':
datetime(2014, 5, 29, 12, 24, 51),
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 3',
'str_column': 'Value for sample 3',
'latitude': 4.8,
'longitude': 4.41,
'taxon_id': 9606,
'scientific_name': 'homo sapiens'},
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
Study.create(User("[email protected]"), "Test study 2", [1], info)
SampleTemplate.create(metadata, Study(2))
mp = get_mountpoint("processed_data")[0][1]
study_fp = join(mp, "2_study_1001_closed_reference_otu_table.biom")
ProcessedData.create("processed_params_uclust", 1, [(study_fp, 6)],
study=Study(2), data_type="16S")
self.conn_handler.execute(
"INSERT INTO qiita.analysis_sample (analysis_id, "
"processed_data_id, sample_id) VALUES "
"(1,2,'2.SKB8.640193'), (1,2,'2.SKD8.640184'), "
"(1,2,'2.SKB7.640196')")
samples = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
2: ['2.SKB8.640193', '2.SKD8.640184']}
self.analysis._build_biom_tables(samples, 10000)
exp = {1: {'1.SKM4.640180', '1.SKM9.640192'},
2: {'2.SKB7.640196'}}
self.assertEqual(self.analysis.dropped_samples, exp)
def test_empty_analysis(self):
analysis = Analysis(2)
# These should be empty as the analysis hasn't started
self.assertEqual(analysis.biom_tables, {})
self.assertEqual(analysis.dropped_samples, {})
def test_retrieve_portal(self):
self.assertEqual(self.analysis._portals, ["QIITA"])
def test_retrieve_data_types(self):
exp = ['18S']
self.assertEqual(self.analysis.data_types, exp)
def test_retrieve_shared_with(self):
self.assertEqual(self.analysis.shared_with, ["[email protected]"])
def test_retrieve_biom_tables(self):
exp = {"18S": join(self.fp, "1_analysis_18S.biom")}
self.assertEqual(self.analysis.biom_tables, exp)
def test_all_associated_filepaths(self):
exp = {10, 11, 12, 13}
self.assertEqual(self.analysis.all_associated_filepath_ids, exp)
def test_retrieve_biom_tables_empty(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
self.assertEqual(new.biom_tables, {})
def test_set_step(self):
new_id = get_count("qiita.analysis") + 1
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
new.step = 2
sql = "SELECT * FROM qiita.analysis_workflow WHERE analysis_id = %s"
obs = self.conn_handler.execute_fetchall(sql, [new_id])
self.assertEqual(obs, [[new_id, 2]])
def test_set_step_twice(self):
new_id = get_count("qiita.analysis") + 1
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
new.step = 2
new.step = 4
sql = "SELECT * FROM qiita.analysis_workflow WHERE analysis_id = %s"
obs = self.conn_handler.execute_fetchall(sql, [new_id])
self.assertEqual(obs, [[new_id, 4]])
def test_retrieve_step(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
new.step = 2
self.assertEqual(new.step, 2)
def test_retrieve_step_new(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
with self.assertRaises(ValueError):
new.step
def test_retrieve_step_locked(self):
self.analysis.status = "public"
with self.assertRaises(QiitaDBStatusError):
self.analysis.step = 3
def test_retrieve_jobs(self):
self.assertEqual(self.analysis.jobs, [1, 2])
def test_retrieve_jobs_none(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
self.assertEqual(new.jobs, [])
def test_retrieve_pmid(self):
self.assertEqual(self.analysis.pmid, "121112")
def test_retrieve_pmid_none(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
self.assertEqual(new.pmid, None)
def test_set_pmid(self):
self.analysis.pmid = "11211221212213"
self.assertEqual(self.analysis.pmid, "11211221212213")
def test_retrieve_mapping_file(self):
exp = join(self.fp, "1_analysis_mapping.txt")
obs = self.analysis.mapping_file
self.assertEqual(obs, exp)
self.assertTrue(exists(exp))
def test_retrieve_mapping_file_none(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis", Analysis(1))
obs = new.mapping_file
self.assertEqual(obs, None)
# def test_get_parent(self):
# raise NotImplementedError()
# def test_get_children(self):
# raise NotImplementedError()
def test_summary_data(self):
obs = self.analysis.summary_data()
exp = {'studies': 1,
'processed_data': 1,
'samples': 5}
self.assertEqual(obs, exp)
def test_add_samples(self):
new = Analysis.create(User("[email protected]"), "newAnalysis",
"A New Analysis")
new.add_samples({1: ['1.SKB8.640193', '1.SKD5.640186']})
obs = new.samples
self.assertEqual(obs.keys(), [1])
self.assertItemsEqual(obs[1], ['1.SKB8.640193', '1.SKD5.640186'])
def test_remove_samples_both(self):
self.analysis.remove_samples(proc_data=(1, ),
samples=('1.SKB8.640193', ))
exp = {1: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180']}
self.assertEqual(self.analysis.samples, exp)
def test_remove_samples_samples(self):
self.analysis.remove_samples(samples=('1.SKD8.640184', ))
exp = {1: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180']}
self.assertEqual(self.analysis.samples, exp)
def test_remove_samples_processed_data(self):
self.analysis.remove_samples(proc_data=(1, ))
exp = {}
self.assertEqual(self.analysis.samples, exp)
def test_share(self):
self.analysis.share(User("[email protected]"))
self.assertEqual(self.analysis.shared_with, ["[email protected]",
"[email protected]"])
def test_unshare(self):
self.analysis.unshare(User("[email protected]"))
self.assertEqual(self.analysis.shared_with, [])
def test_get_samples(self):
obs = self.analysis._get_samples()
exp = {1: ['1.SKB7.640196', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM4.640180', '1.SKM9.640192']}
self.assertEqual(obs, exp)
def test_build_mapping_file(self):
new_id = get_count('qiita.filepath') + 1
samples = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
self.analysis._build_mapping_file(samples)
obs = self.analysis.mapping_file
self.assertEqual(obs, self.map_fp)
base_dir = get_mountpoint('analysis')[0][1]
obs = pd.read_csv(obs, sep='\t', infer_datetime_format=True,
parse_dates=True, index_col=False, comment='\t')
exp = pd.read_csv(join(base_dir, '1_analysis_mapping_exp.txt'),
sep='\t', infer_datetime_format=True,
parse_dates=True, index_col=False, comment='\t')
assert_frame_equal(obs, exp)
sql = """SELECT * FROM qiita.filepath
WHERE filepath=%s ORDER BY filepath_id"""
obs = self.conn_handler.execute_fetchall(
sql, ("%d_analysis_mapping.txt" % self.analysis.id,))
exp = [[13, '1_analysis_mapping.txt', 9, '852952723', 1, 1],
[new_id, '1_analysis_mapping.txt', 9, '1606265094', 1, 1]]
self.assertEqual(obs, exp)
sql = """SELECT * FROM qiita.analysis_filepath
WHERE analysis_id=%s ORDER BY filepath_id"""
obs = self.conn_handler.execute_fetchall(sql, (self.analysis.id,))
exp = [[1L, 14L, 2L], [1L, 15L, None], [1L, new_id, None]]
def test_build_mapping_file_duplicate_samples(self):
samples = {1: ['1.SKB8.640193', '1.SKB8.640193', '1.SKD8.640184']}
with self.assertRaises(QiitaDBError):
self.analysis._build_mapping_file(samples)
def test_build_biom_tables(self):
new_id = get_count('qiita.filepath') + 1
samples = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
self.analysis._build_biom_tables(samples, 100)
obs = self.analysis.biom_tables
self.assertEqual(obs, {'18S': self.biom_fp})
table = load_table(self.biom_fp)
obs = set(table.ids(axis='sample'))
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
self.assertEqual(obs, exp)
obs = table.metadata('1.SKB8.640193')
exp = {'Study':
'Identification of the Microbiomes for Cannabis Soils',
'Processed_id': 1}
self.assertEqual(obs, exp)
sql = """SELECT EXISTS(SELECT * FROM qiita.filepath
WHERE filepath_id=%s)"""
obs = self.conn_handler.execute_fetchone(sql, (new_id,))[0]
self.assertTrue(obs)
sql = """SELECT * FROM qiita.analysis_filepath
WHERE analysis_id=%s ORDER BY filepath_id"""
obs = self.conn_handler.execute_fetchall(sql, (self.analysis.id,))
exp = [[1L, 14L, 2L], [1L, 15L, None], [1L, new_id, None]]
def test_build_files(self):
self.analysis.build_files()
def test_build_files_raises_type_error(self):
with self.assertRaises(TypeError):
self.analysis.build_files('string')
with self.assertRaises(TypeError):
self.analysis.build_files(100.5)
def test_build_files_raises_value_error(self):
with self.assertRaises(ValueError):
self.analysis.build_files(0)
with self.assertRaises(ValueError):
self.analysis.build_files(-10)
def test_add_file(self):
new_id = get_count('qiita.filepath') + 1
fp = join(get_mountpoint('analysis')[0][1], 'testfile.txt')
with open(fp, 'w') as f:
f.write('testfile!')
self.analysis._add_file('testfile.txt', 'plain_text', '18S')
obs = self.conn_handler.execute_fetchall(
'SELECT * FROM qiita.filepath WHERE filepath_id = %s',
(new_id,))
exp = [[new_id, 'testfile.txt', 9, '3675007573', 1, 1]]
self.assertEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
'SELECT * FROM qiita.analysis_filepath WHERE filepath_id = %s',
(new_id,))
exp = [[1, new_id, 2]]
self.assertEqual(obs, exp)
@qiita_test_checker()
class TestCollection(TestCase):
def setUp(self):
self.collection = Collection(1)
def test_create(self):
Collection.create(User('[email protected]'), 'TestCollection2', 'Some desc')
obs = self.conn_handler.execute_fetchall(
'SELECT * FROM qiita.collection WHERE collection_id = 2')
exp = [[2, '[email protected]', 'TestCollection2', 'Some desc', 1]]
self.assertEqual(obs, exp)
def test_create_no_desc(self):
Collection.create(User('[email protected]'), 'Test Collection2')
obs = self.conn_handler.execute_fetchall(
'SELECT * FROM qiita.collection WHERE collection_id = 2')
exp = [[2, '[email protected]', 'Test Collection2', None, 1]]
self.assertEqual(obs, exp)
def test_delete(self):
Collection.delete(1)
obs = self.conn_handler.execute_fetchall(
'SELECT * FROM qiita.collection')
exp = []
self.assertEqual(obs, exp)
def test_delete_public(self):
self.collection.status = 'public'
with self.assertRaises(QiitaDBStatusError):
Collection.delete(1)
obs = self.conn_handler.execute_fetchall(
'SELECT * FROM qiita.collection')
exp = [[1, '[email protected]', 'TEST_COLLECTION',
'collection for testing purposes', 2]]
self.assertEqual(obs, exp)
def test_retrieve_name(self):
obs = self.collection.name
exp = "TEST_COLLECTION"
self.assertEqual(obs, exp)
def test_set_name(self):
self.collection.name = "NeW NaMe 123"
self.assertEqual(self.collection.name, "NeW NaMe 123")
def test_set_name_public(self):
self.collection.status = "public"
with self.assertRaises(QiitaDBStatusError):
self.collection.name = "FAILBOAT"
def test_retrieve_desc(self):
obs = self.collection.description
exp = "collection for testing purposes"
self.assertEqual(obs, exp)
def test_set_desc(self):
self.collection.description = "NeW DeSc 123"
self.assertEqual(self.collection.description, "NeW DeSc 123")
def test_set_desc_public(self):
self.collection.status = "public"
with self.assertRaises(QiitaDBStatusError):
self.collection.description = "FAILBOAT"
def test_retrieve_owner(self):
obs = self.collection.owner
exp = "[email protected]"
self.assertEqual(obs, exp)
def test_retrieve_analyses(self):
obs = self.collection.analyses
exp = [1]
self.assertEqual(obs, exp)
def test_retrieve_highlights(self):
obs = self.collection.highlights
exp = [1]
self.assertEqual(obs, exp)
def test_retrieve_shared_with(self):
obs = self.collection.shared_with
exp = ["[email protected]"]
self.assertEqual(obs, exp)
def test_add_analysis(self):
self.collection.add_analysis(Analysis(2))
obs = self.collection.analyses
exp = [1, 2]
self.assertEqual(obs, exp)
def test_remove_analysis(self):
self.collection.remove_analysis(Analysis(1))
obs = self.collection.analyses
exp = []
self.assertEqual(obs, exp)
def test_highlight_job(self):
self.collection.highlight_job(Job(2))
obs = self.collection.highlights
exp = [1, 2]
self.assertEqual(obs, exp)
def test_remove_highlight(self):
self.collection.remove_highlight(Job(1))
obs = self.collection.highlights
exp = []
self.assertEqual(obs, exp)
def test_share(self):
self.collection.share(User("[email protected]"))
obs = self.collection.shared_with
exp = ["[email protected]", "[email protected]"]
self.assertEqual(obs, exp)
def test_unshare(self):
self.collection.unshare(User("[email protected]"))
obs = self.collection.shared_with
exp = []
self.assertEqual(obs, exp)
if __name__ == "__main__":
main()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
Upward-Spiral-Science/team1 | code/Imaging Cortical Layers.py | 1 | 1976 | from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
import urllib2
import scipy.stats as stats
np.set_printoptions(precision=3, suppress=True)
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science'
'/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",")[1:] # don't want first row (labels)
# chopping data based on thresholds on x and y coordinates
x_bounds = (409, 3529)
y_bounds = (1564, 3124)
def check_in_bounds(row, x_bounds, y_bounds):
if row[0] < x_bounds[0] or row[0] > x_bounds[1]:
return False
if row[1] < y_bounds[0] or row[1] > y_bounds[1]:
return False
if row[3] == 0:
return False
return True
indices_in_bound, = np.where(np.apply_along_axis(check_in_bounds, 1, csv,
x_bounds, y_bounds))
data_thresholded = csv[indices_in_bound]
n = data_thresholded.shape[0]
def synapses_over_unmasked(row):
s = (row[4]/row[3])*(64**3)
return [row[0], row[1], row[2], s]
syn_unmasked = np.apply_along_axis(synapses_over_unmasked, 1, data_thresholded)
syn_normalized = syn_unmasked
# Looking at images across y, and of the layers in the y-direction
#########################################################################################
from image_builder import get_image
xs = np.unique(data_thresholded[:,0])
ys = np.unique(data_thresholded[:,1])
# Layer across y
get_image((0,1),(0,len(ys)-1),xs,ys, "across_y")
print len(ys)-1
# Each y-layer defined by bounds of local minima in total syn density at each y
y_bounds = [(1564,1837), (1837,2071), (2071,2305), (2305,2539), (2539,3124)]
for _, bounds in enumerate(y_bounds):
print "for\n"
y_lower = np.where(ys==bounds[0])[0][0]
y_upper = np.where(ys==bounds[1])[0][0]
print y_lower,y_upper, "hi\n"
i = get_image((0,1),(y_lower,y_upper),xs,ys,str(bounds[0])+"_"+str(bounds[1])) | apache-2.0 |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/figure.py | 69 | 38331 | """
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
top level container for all plot elements
"""
import numpy as np
import time
import artist
from artist import Artist
from axes import Axes, SubplotBase, subplot_class_factory
from cbook import flatten, allequal, Stack, iterable, dedent
import _image
import colorbar as cbar
from image import FigureImage
from matplotlib import rcParams
from patches import Rectangle
from text import Text, _process_text_args
from legend import Legend
from transforms import Affine2D, Bbox, BboxTransformTo, TransformedBbox
from projections import projection_factory, get_projection_names, \
get_projection_class
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
import matplotlib.cbook as cbook
class SubplotParams:
"""
A class to hold the parameters for a subplot
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fraction of the figure width or height.
All values default to their rc params
The following attributes are available
*left* = 0.125
the left side of the subplots of the figure
*right* = 0.9
the right side of the subplots of the figure
*bottom* = 0.1
the bottom of the subplots of the figure
*top* = 0.9
the top of the subplots of the figure
*wspace* = 0.2
the amount of width reserved for blank space between subplots
*hspace* = 0.2
the amount of height reserved for white space between subplots
*validate*
make sure the params are in a legal state (*left*<*right*, etc)
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self,left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left>=self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom>=self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The Figure instance supports callbacks through a *callbacks*
attribute which is a :class:`matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'dpi_changed', and
the callback will be called with ``func(fig)`` where fig is the
:class:`Figure` instance.
The figure patch is drawn by a the attribute
*patch*
a :class:`matplotlib.patches.Rectangle` instance
*suppressComposite*
for multiple figure images, the figure will make composite
images depending on the renderer option_image_nocomposite
function. If suppressComposite is True|False, this will
override the renderer
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __init__(self,
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
linewidth = 1.0, # the default linewidth of the frame
frameon = True, # whether or not to draw the figure frame
subplotpars = None, # default to rc
):
"""
*figsize*
w,h tuple in inches
*dpi*
dots per inch
*facecolor*
the figure patch facecolor; defaults to rc ``figure.facecolor``
*edgecolor*
the figure patch edge color; defaults to rc ``figure.edgecolor``
*linewidth*
the figure patch edge linewidth; the default linewidth of the frame
*frameon*
if False, suppress drawing the figure frame
*subplotpars*
a :class:`SubplotParams` instance, defaults to rc
"""
Artist.__init__(self)
self.callbacks = cbook.CallbackRegistry(('dpi_changed', ))
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None: facecolor = rcParams['figure.facecolor']
if edgecolor is None: edgecolor = rcParams['figure.edgecolor']
self.dpi_scale_trans = Affine2D()
self.dpi = dpi
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.frameon = frameon
self.transFigure = BboxTransformTo(self.bbox)
# the figurePatch name is deprecated
self.patch = self.figurePatch = Rectangle(
xy=(0,0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
linewidth=linewidth,
)
self._set_artist_props(self.patch)
self._hold = rcParams['axes.hold']
self.canvas = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self._axstack = Stack() # maintain the current axes
self.axes = []
self.clf()
self._cachedRenderer = None
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi):
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi, dpi)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi)
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
*bottom*
the bottom of the subplots for :meth:`subplots_adjust`
*rotation*
the rotation of the xtick labels
*ha*
the horizontal alignment of the xticklabels
"""
allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax in self.axes])
if len(self.axes)==1:
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
def get_children(self):
'get a list of artists contained in the figure'
children = [self.patch]
children.extend(self.artists)
children.extend(self.axes)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.images)
children.extend(self.legends)
return children
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns True,{}
"""
if callable(self._contains): return self._contains(self,mouseevent)
#inside = mouseevent.x >= 0 and mouseevent.y >= 0
inside = self.bbox.contains(mouseevent.x,mouseevent.y)
return inside,{}
def get_window_extent(self, *args, **kwargs):
'get the figure bounding box in display space; kwargs are void'
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
kwargs are :class:`matplotlib.text.Text` properties. Using figure
coordinates, the defaults are:
- *x* = 0.5
the x location of text in figure coords
- *y* = 0.98
the y location of the text in figure coords
- *horizontalalignment* = 'center'
the horizontal alignment of the text
- *verticalalignment* = 'top'
the vertical alignment of the text
A :class:`matplotlib.text.Text` instance is returned.
Example::
fig.subtitle('this is the figure title', fontsize=12)
"""
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['horizontalalignment'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['verticalalignment'] = 'top'
t = self.text(x, y, t, **kwargs)
return t
def set_canvas(self, canvas):
"""
Set the canvas the contains the figure
ACCEPTS: a FigureCanvas instance
"""
self.canvas = canvas
def hold(self, b=None):
"""
Set the hold state. If hold is None (default), toggle the
hold state. Else set the hold state to boolean value b.
Eg::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
"""
if b is None: self._hold = not self._hold
else: self._hold = b
def figimage(self, X,
xo=0,
yo=0,
alpha=1.0,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None):
"""
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= ==========================================================
Keyword Description
========= ==========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.cm.ColorMap` instance, eg cm.jet.
If None, default to the rc ``image.cmap`` value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either is
None, the min and max of the luminance values will be
used. Note if you pass a norm instance, the settings for
*vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is 1.0
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= ==========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
"""
if not self._hold: self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
return im
def set_figsize_inches(self, *args, **kwargs):
import warnings
warnings.warn('Use set_size_inches instead!', DeprecationWarning)
self.set_size_inches(*args, **kwargs)
def set_size_inches(self, *args, **kwargs):
"""
set_size_inches(w,h, forward=False)
Set the figure size in inches
Usage::
fig.set_size_inches(w,h) # OR
fig.set_size_inches((w,h) )
optional kwarg *forward=True* will cause the canvas size to be
automatically updated; eg you can resize the figure window
from the shell
WARNING: forward=True is broken on all backends except GTK*
and WX*
ACCEPTS: a w,h tuple with w,h in inches
"""
forward = kwargs.get('forward', False)
if len(args)==1:
w,h = args[0]
else:
w,h = args
dpival = self.dpi
self.bbox_inches.p1 = w, h
if forward:
dpival = self.dpi
canvasw = w*dpival
canvash = h*dpival
manager = getattr(self.canvas, 'manager', None)
if manager is not None:
manager.resize(int(canvasw), int(canvash))
def get_size_inches(self):
return self.bbox_inches.p1
def get_edgecolor(self):
'Get the edge color of the Figure rectangle'
return self.patch.get_edgecolor()
def get_facecolor(self):
'Get the face color of the Figure rectangle'
return self.patch.get_facecolor()
def get_figwidth(self):
'Return the figwidth as a float'
return self.bbox_inches.width
def get_figheight(self):
'Return the figheight as a float'
return self.bbox_inches.height
def get_dpi(self):
'Return the dpi as a float'
return self.dpi
def get_frameon(self):
'get the boolean indicating frameon'
return self.frameon
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the dots-per-inch of the figure
ACCEPTS: float
"""
self.dpi = val
def set_figwidth(self, val):
"""
Set the width of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.x1 = val
def set_figheight(self, val):
"""
Set the height of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.y1 = val
def set_frameon(self, b):
"""
Set whether the figure frame (background) is displayed or invisible
ACCEPTS: boolean
"""
self.frameon = b
def delaxes(self, a):
'remove a from the figure and update the current axes'
self.axes.remove(a)
self._axstack.remove(a)
keys = []
for key, thisax in self._seen.items():
if a==thisax: del self._seen[key]
for func in self._axobservers: func(self)
def _make_key(self, *args, **kwargs):
'make a hashable key out of args and kwargs'
def fixitems(items):
#items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
if iterable(v): v = tuple(v)
ret.append((k,v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if iterable(a): a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(kwargs.items())
return key
def add_axes(self, *args, **kwargs):
"""
Add an a axes with axes rect [*left*, *bottom*, *width*,
*height*] where all quantities are in fractions of figure
width and height. kwargs are legal
:class:`~matplotlib.axes.Axes` kwargs plus *projection* which
sets the projection type of the axes. (For backward
compatibility, ``polar=True`` may also be provided, which is
equivalent to ``projection='polar'``). Valid values for
*projection* are: %(list)s. Some of these projections support
additional kwargs, which may be provided to :meth:`add_axes`::
rect = l,b,w,h
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, axisbg='g')
fig.add_axes(rect, polar=True)
fig.add_axes(rect, projection='polar')
fig.add_axes(ax) # add an Axes instance
If the figure already has an axes with the same parameters,
then it will simply make that axes current and return it. If
you do not want this behavior, eg. you want to force the
creation of a new axes, you must use a unique set of args and
kwargs. The axes :attr:`~matplotlib.axes.Axes.label`
attribute has been exposed for this purpose. Eg., if you want
two axes that are otherwise identical to be added to the
figure, make sure you give them unique labels::
fig.add_axes(rect, label='axes1')
fig.add_axes(rect, label='axes2')
The :class:`~matplotlib.axes.Axes` instance will be returned.
The following kwargs are supported:
%(Axes)s
"""
key = self._make_key(*args, **kwargs)
if key in self._seen:
ax = self._seen[key]
self.sca(ax)
return ax
if not len(args): return
if isinstance(args[0], Axes):
a = args[0]
assert(a.get_figure() is self)
else:
rect = args[0]
ispolar = kwargs.pop('polar', False)
projection = kwargs.pop('projection', None)
if ispolar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection='%s'. " +
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
a = projection_factory(projection, self, rect, **kwargs)
self.axes.append(a)
self._axstack.push(a)
self.sca(a)
self._seen[key] = a
return a
add_axes.__doc__ = dedent(add_axes.__doc__) % \
{'list': (", ".join(get_projection_names())),
'Axes': artist.kwdocd['Axes']}
def add_subplot(self, *args, **kwargs):
"""
Add a subplot. Examples:
fig.add_subplot(111)
fig.add_subplot(1,1,1) # equivalent but more general
fig.add_subplot(212, axisbg='r') # add subplot with red background
fig.add_subplot(111, polar=True) # add a polar subplot
fig.add_subplot(sub) # add Subplot instance sub
*kwargs* are legal :class:`!matplotlib.axes.Axes` kwargs plus
*projection*, which chooses a projection type for the axes.
(For backward compatibility, *polar=True* may also be
provided, which is equivalent to *projection='polar'*). Valid
values for *projection* are: %(list)s. Some of these projections
support additional *kwargs*, which may be provided to
:meth:`add_axes`.
The :class:`~matplotlib.axes.Axes` instance will be returned.
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it.
The following kwargs are supported:
%(Axes)s
"""
kwargs = kwargs.copy()
if not len(args): return
if isinstance(args[0], SubplotBase):
a = args[0]
assert(a.get_figure() is self)
else:
ispolar = kwargs.pop('polar', False)
projection = kwargs.pop('projection', None)
if ispolar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection='%s'. " +
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
projection_class = get_projection_class(projection)
key = self._make_key(*args, **kwargs)
if key in self._seen:
ax = self._seen[key]
if isinstance(ax, projection_class):
self.sca(ax)
return ax
else:
self.axes.remove(ax)
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
self._seen[key] = a
self.axes.append(a)
self._axstack.push(a)
self.sca(a)
return a
add_subplot.__doc__ = dedent(add_subplot.__doc__) % {
'list': ", ".join(get_projection_names()),
'Axes': artist.kwdocd['Axes']}
def clf(self):
"""
Clear the figure
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry(('dpi_changed', ))
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self.axes
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self._seen = {}
self.artists = []
self.lines = []
self.patches = []
self.texts=[]
self.images = []
self.legends = []
self._axobservers = []
def clear(self):
"""
Clear the figure -- synonym for fig.clf
"""
self.clf()
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase` instance renderer
"""
# draw the figure bounding box, perhaps none for white figure
#print 'figure draw'
if not self.get_visible(): return
renderer.open_group('figure')
if self.frameon: self.patch.draw(renderer)
# todo: respect zorder
for p in self.patches: p.draw(renderer)
for l in self.lines: l.draw(renderer)
for a in self.artists: a.draw(renderer)
# override the renderer default if self.suppressComposite
# is not None
composite = renderer.option_image_nocomposite()
if self.suppressComposite is not None:
composite = self.suppressComposite
if len(self.images)<=1 or composite or not allequal([im.origin for im in self.images]):
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (_image.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag), im.ox, im.oy)
for im in self.images]
im = _image.from_images(self.bbox.height * mag,
self.bbox.width * mag,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
clippath, affine = self.get_transformed_clip_path_and_affine()
renderer.draw_image(l, b, im, self.bbox,
clippath, affine)
# render the axes
for a in self.axes: a.draw(renderer)
# render the figure text
for t in self.texts: t.draw(renderer)
for legend in self.legends:
legend.draw(renderer)
renderer.close_group('figure')
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
draw :class:`matplotlib.artist.Artist` instance *a* only --
this is available only after the figure is drawn
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def get_axes(self):
return self.axes
def legend(self, handles, labels, *args, **kwargs):
"""
Place a legend in the figure. Labels are a sequence of
strings, handles is a sequence of
:class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances, and loc can be a
string or an integer specifying the legend location
USAGE::
legend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right')
The *loc* location codes are::
'best' : 0, (currently not supported for figure legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
*loc* can also be an (x,y) tuple in figure coords, which
specifies the lower left of the legend box. figure coords are
(0,0) is the left, bottom of the figure and 1,1 is the right,
top.
The legend instance is returned. The following kwargs are supported
*loc*
the location of the legend
*numpoints*
the number of points in the legend line
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*pad*
the fractional whitespace inside the legend border
*markerscale*
the relative size of legend markers vs. original
*shadow*
if True, draw a shadow behind legend
*labelsep*
the vertical space between the legend entries
*handlelen*
the length of the legend lines
*handletextsep*
the space between the legend line and legend text
*axespad*
the border between the axes and legend edge
.. plot:: mpl_examples/pylab_examples/figlegend_demo.py
"""
handles = flatten(handles)
l = Legend(self, handles, labels, *args, **kwargs)
self.legends.append(l)
return l
def text(self, x, y, s, *args, **kwargs):
"""
Call signature::
figtext(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(
x=x, y=y, text=s,
)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
return t
text.__doc__ = dedent(text.__doc__) % artist.kwdocd
def _set_artist_props(self, a):
if a!= self:
a.set_figure(self)
a.set_transform(self.transFigure)
def gca(self, **kwargs):
"""
Return the current axes, creating one if necessary
The following kwargs are supported
%(Axes)s
"""
ax = self._axstack()
if ax is not None:
ispolar = kwargs.get('polar', False)
projection = kwargs.get('projection', None)
if ispolar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection='%s'. " +
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
projection_class = get_projection_class(projection)
if isinstance(ax, projection_class):
return ax
return self.add_subplot(111, **kwargs)
gca.__doc__ = dedent(gca.__doc__) % artist.kwdocd
def sca(self, a):
'Set the current axes to be a and return a'
self._axstack.bubble(a)
for func in self._axobservers: func(self)
return a
def add_axobserver(self, func):
'whenever the axes state change, func(self) will be called'
self._axobservers.append(func)
def savefig(self, *args, **kwargs):
"""
call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False):
Save the current figure.
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python file-like object.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename.
Keyword arguments:
*dpi*: [ None | scalar > 0 ]
The resolution in dots per inch. If *None* it will default to
the value ``savefig.dpi`` in the matplotlibrc file.
*facecolor*, *edgecolor*:
the colors of the figure rectangle
*orientation*: [ 'landscape' | 'portrait' ]
not supported on all backends; currently only on postscript output
*papertype*:
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*transparent*:
If *True*, the figure patch and axes patches will all be
transparent. This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
"""
for key in ('dpi', 'facecolor', 'edgecolor'):
if key not in kwargs:
kwargs[key] = rcParams['savefig.%s'%key]
transparent = kwargs.pop('transparent', False)
if transparent:
original_figure_alpha = self.patch.get_alpha()
self.patch.set_alpha(0.0)
original_axes_alpha = []
for ax in self.axes:
patch = ax.patch
original_axes_alpha.append(patch.get_alpha())
patch.set_alpha(0.0)
self.canvas.print_figure(*args, **kwargs)
if transparent:
self.patch.set_alpha(original_figure_alpha)
for ax, alpha in zip(self.axes, original_axes_alpha):
ax.patch.set_alpha(alpha)
def colorbar(self, mappable, cax=None, ax=None, **kw):
if ax is None:
ax = self.gca()
if cax is None:
cax, kw = cbar.make_axes(ax, **kw)
cax.hold(True)
cb = cbar.Colorbar(cax, mappable, **kw)
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.set_colorbar(cb, cax)
self.sca(ax)
return cb
colorbar.__doc__ = '''
Create a colorbar for a ScalarMappable instance.
Documentation for the pylab thin wrapper:
%s
'''% cbar.colorbar_doc
def subplots_adjust(self, *args, **kwargs):
"""
fig.subplots_adjust(left=None, bottom=None, right=None, wspace=None, hspace=None)
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc where
None) and update the subplot locations
"""
self.subplotpars.update(*args, **kwargs)
import matplotlib.axes
for ax in self.axes:
if not isinstance(ax, matplotlib.axes.SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, matplotlib.axes.SubplotBase):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey, matplotlib.axes.SubplotBase):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
def ginput(self, n=1, timeout=30, show_clicks=True):
"""
call signature::
ginput(self, n=1, timeout=30, show_clicks=True)
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is zero or negative, does not timeout.
If *n* is zero or negative, accumulate clicks until a middle click
(or potentially both mouse buttons at once) terminates the input.
Right clicking cancels last input.
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
call signature::
waitforbuttonpress(self, timeout=-1)
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def figaspect(arg):
"""
Create a figure with specified aspect ratio. If *arg* is a number,
use that aspect ratio. If *arg* is an array, figaspect will
determine the width and height for a figure that would fit array
preserving aspect ratio. The figure width, height in inches are
returned. Be sure to create an axes with equal with and height,
eg
Example usage::
# make a figure twice as tall as it is wide
w, h = figaspect(2.)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
# make a figure with the proper aspect for an array
A = rand(5,3)
w, h = figaspect(A)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Thanks to Fernando Perez for this function
"""
isarray = hasattr(arg, 'shape')
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0,2.0)) # min length for width/height
figsize_max = np.array((16.0,16.0)) # max length for width/height
#figsize_min = rcParams['figure.figsize_min']
#figsize_max = rcParams['figure.figsize_max']
# Extract the aspect ratio of the array
if isarray:
nr,nc = arg.shape[:2]
arr_ratio = float(nr)/nc
else:
arr_ratio = float(arg)
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height/arr_ratio,fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0,*(newsize/figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0,*(newsize/figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize,figsize_min,figsize_max)
return newsize
artist.kwdocd['Figure'] = artist.kwdoc(Figure)
| gpl-3.0 |
bashalex/datapot | examples/imdb_eval.py | 1 | 1225 | #!/usr/bin/env python
'''
Bag of Words Meets Bags of Popcorn
Usage example for unstructured textual bzip2-compressed data
datapot.fit method subsamples the data to detect language and choose corresponding stopwords and stemming.
For each review datapot.transform generates an SVD-compressed 12-dimensional tfidf-vector representation.
'''
from __future__ import print_function
import sys
import bz2
import time
import xgboost as xgb
from sklearn.model_selection import cross_val_score
import datapot as dp
from datapot.datasets import load_imdb
data = load_imdb()
datapot = dp.DataPot()
t0 = time.time()
datapot.detect(data)
print('detect time:', time.time()-t0)
datapot.remove_transformer('sentiment', 0)
t0 = time.time()
datapot.fit(data, verbose=True)
print('fit time:', time.time()-t0)
t0 = time.time()
df = datapot.transform(data)
print('transform time:', time.time()-t0)
X = df.drop(['sentiment'], axis=1)
y = df['sentiment']
model = xgb.XGBClassifier()
cv_score = cross_val_score(model, X, y, cv=5)
assert all(i > 0.5 for i in cv_score), 'Low score!'
print('Cross-val score:', cv_score)
model.fit(X, y)
fi = model.feature_importances_
print('Feature importance:')
print(*(list(zip(X.columns, fi))), sep='\n')
| gpl-3.0 |
mjgrav2001/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
srndic/mimicus | reproduction/fig9.py | 1 | 6873 | #!/usr/bin/env python
'''
Copyright 2014 Nedim Srndic, University of Tuebingen
This file is part of Mimicus.
Mimicus is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mimicus is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mimicus. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
fig9.py
Reproduction of Figure 9 of the paper "Practical Evasion of a
Learning-Based Classifier: A Case Study" by Nedim Srndic and
Pavel Laskov.
Created on March 21, 2014.
'''
from argparse import ArgumentParser
import multiprocessing
import os
import random
import sys
from matplotlib import pyplot
from mimicus.tools.featureedit import FeatureDescriptor, FeatureEdit, \
FileDefined
from mimicus.classifiers.RandomForest import RandomForest
from mimicus.tools.datasets import csv2numpy
from sklearn.metrics import accuracy_score
import common
import config
def mimicry(wolf_fname, sheep_feats, m_id):
'''
Mimics file with the features sheep_feats using the attack file
with the name wolf_fname. Returns the resulting feature vector.
'''
mimic = FeatureEdit(wolf_fname).modify_file(sheep_feats, '/run/shm')
os.remove(mimic['path'])
return mimic['feats'], m_id
def mimicry_wrap(args):
'''
Helper function for calling the mimicry function in parallel.
'''
return mimicry(*args)
def fig9(tr_vec, tr_labels, te_vec, te_labels, fnames):
'''
Reproduction of results published in Table 10 of "Malicious PDF Detection
Using Metadata and Structural Features" by Charles Smutz and
Angelos Stavrou, ACSAC 2012.
'''
print 'Loading random forest classifier...'
rf = RandomForest()
rf.load_model(config.get('experiments', 'FTC_model'))
ben_means, ben_devs = common.get_benign_mean_stddev(tr_vec, tr_labels)
res = []
# te_vec will be randomly modified in feature space.
# f_vec will be randomly modified in feature space but the
# randomly generated variables will be adjusted to be
# valid for the given feature
f_vec = te_vec.copy()
print 'Got {} samples. Modifying them for attack...'.format(len(te_vec))
print '{:>25s} {:>15s} {:>15s}'.format('Feature name', 'Feature space',
'Problem space')
pool = multiprocessing.Pool(processes=None)
# Modify top features one by one
for f_name in common.top_feats:
f_i = FeatureDescriptor.get_feature_names().index(f_name)
f_desc = FeatureDescriptor.get_feature_description(f_name)
print '{:>25s}'.format(f_name),
# For all files
for i in range(len(te_vec)):
if te_labels[i] != 1:
# Modify only malicious files
continue
first_val = True
while True:
# Keep randomly generating a new value
# Stop when it becomes valid for the current feature
new_val = random.gauss(ben_means[f_i], ben_devs[f_i])
if first_val:
# Make sure we generate random values for te_vec
te_vec[i][f_i] = new_val
first_val = False
# If not valid, retry
if f_desc['type'] == bool:
new_val = False if new_val < 0.5 else True
elif f_desc['type'] == int:
new_val = int(round(new_val))
if f_desc['range'][0] == FileDefined and new_val < 0:
continue
elif (f_desc['range'][0] != FileDefined and
new_val < f_desc['range'][0]):
continue
if f_desc['type'] != bool and f_desc['range'][1] < new_val:
continue
# Valid, win!
f_vec[i][f_i] = new_val
break
# mod_data has feature values read from the problem space,
# i.e., by converting feature vectors to files and back
mod_data = f_vec.copy()
pargs = [(fnames[i], f_vec[i], i)
for i, l in enumerate(te_labels) if l == 1]
for mimic, m_id in pool.imap(mimicry_wrap, pargs):
mod_data[m_id] = mimic
pred = rf.predict(te_vec)
fspace = accuracy_score(te_labels, pred)
print '{:>15.3f}'.format(fspace),
pred = rf.predict(mod_data)
pspace = accuracy_score(te_labels, pred)
print '{:>15.3f}'.format(pspace)
res.append((fspace, pspace))
return res
def main():
random.seed(0)
parser = ArgumentParser()
parser.add_argument('--plot', help='Where to save plot (file name)',
default=False)
parser.add_argument('--show', help='Show plot in a window', default=False,
action='store_true')
args = parser.parse_args()
print 'Loading training data from CSV...'
tr_data, tr_labels, _ = csv2numpy(config.get('datasets', 'contagio'))
print 'Loading test data from CSV...'
te_data, te_labels, te_fnames = csv2numpy(config.get('datasets',
'contagio_test'))
print 'Evaluating...'
scores = fig9(tr_data, tr_labels, te_data, te_labels, te_fnames)
if not (args.plot or args.show):
return 0
# Plot
feat_points, file_points = zip(*scores)
fig = pyplot.figure()
pyplot.plot(feat_points, label='Feature space',
marker='o', color='k', linewidth=2)
pyplot.plot(file_points, label='Problem space',
marker='^', color='k', linewidth=2, linestyle='--')
axes = fig.gca()
# Set up axes and labels
axes.yaxis.set_ticks([r / 10.0 for r in range(11)])
axes.yaxis.grid()
axes.set_ylim(0, 1)
axes.set_ylabel('Accuracy')
xticklabels = [common.top_feats[0]] + ['(+) ' + name
for name in common.top_feats[1:]]
axes.set_xticklabels(xticklabels, rotation=60, ha='right')
fig.subplots_adjust(bottom=0.34, top=0.95, left=0.11, right=0.98)
pyplot.legend(loc='lower left')
if args.show:
pyplot.show()
if args.plot:
pyplot.savefig(args.plot, dpi=300, bbox_inches='tight')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
robin-lai/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.