repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BeelineShopping/BeelineMatcher | crf/lib/training/cli.py | 1 | 4982 | import re
import decimal
import optparse
import pandas as pd
from training import utils
class Cli(object):
def __init__(self, argv):
self.opts = self._parse_args(argv)
self._upstream_cursor = None
def run(self):
self.generate_data(self.opts.count, self.opts.offset)
def generate_data(self, count, offset):
"""
Generates training data in the CRF++ format for the ingredient
tagging task
"""
df = pd.read_csv(self.opts.data_path)
df = df.fillna("")
start = int(offset)
end = int(offset) + int(count)
df_slice = df.iloc[start: end]
for index, row in df_slice.iterrows():
try:
# extract the display name
display_input = utils.cleanUnicodeFractions(row["input"])
tokens = utils.tokenize(display_input)
del(row["input"])
rowData = self.addPrefixes([(t, self.matchUp(t, row)) for t in tokens])
for i, (token, tags) in enumerate(rowData):
features = utils.getFeatures(token, i+1, tokens)
print utils.joinLine([token] + features + [self.bestTag(tags)])
# ToDo: deal with this
except UnicodeDecodeError:
pass
print
def parseNumbers(self, s):
"""
Parses a string that represents a number into a decimal data type so that
we can match the quantity field in the db with the quantity that appears
in the display name. Rounds the result to 2 places.
"""
ss = utils.unclump(s)
m3 = re.match('^\d+$', ss)
if m3 is not None:
return decimal.Decimal(round(float(ss), 2))
m1 = re.match(r'(\d+)\s+(\d)/(\d)', ss)
if m1 is not None:
num = int(m1.group(1)) + (float(m1.group(2)) / float(m1.group(3)))
return decimal.Decimal(str(round(num,2)))
m2 = re.match(r'^(\d)/(\d)$', ss)
if m2 is not None:
num = float(m2.group(1)) / float(m2.group(2))
return decimal.Decimal(str(round(num,2)))
return None
def matchUp(self, token, ingredientRow):
"""
Returns our best guess of the match between the tags and the
words from the display text.
This problem is difficult for the following reasons:
* not all the words in the display name have associated tags
* the quantity field is stored as a number, but it appears
as a string in the display name
* the comment is often a compilation of different comments in
the display name
"""
ret = []
# strip parens from the token, since they often appear in the
# display_name, but are removed from the comment.
token = utils.normalizeToken(token)
decimalToken = self.parseNumbers(token)
for key, val in ingredientRow.iteritems():
if isinstance(val, basestring):
for n, vt in enumerate(utils.tokenize(val)):
if utils.normalizeToken(vt) == token:
ret.append(key.upper())
elif decimalToken is not None:
try:
if val == decimalToken:
ret.append(key.upper())
except:
pass
return ret
def addPrefixes(self, data):
"""
We use BIO tagging/chunking to differentiate between tags
at the start of a tag sequence and those in the middle. This
is a common technique in entity recognition.
Reference: http://www.kdd.cis.ksu.edu/Courses/Spring-2013/CIS798/Handouts/04-ramshaw95text.pdf
"""
prevTags = None
newData = []
for n, (token, tags) in enumerate(data):
newTags = []
for t in tags:
p = "B" if ((prevTags is None) or (t not in prevTags)) else "I"
newTags.append("%s-%s" % (p, t))
newData.append((token, newTags))
prevTags = tags
return newData
def bestTag(self, tags):
if len(tags) == 1:
return tags[0]
# if there are multiple tags, pick the first which isn't COMMENT
else:
for t in tags:
if (t != "B-COMMENT") and (t != "I-COMMENT"):
return t
# we have no idea what to guess
return "OTHER"
def _parse_args(self, argv):
"""
Parse the command-line arguments into a dict.
"""
opts = optparse.OptionParser()
opts.add_option("--count", default="100", help="(%default)")
opts.add_option("--offset", default="0", help="(%default)")
opts.add_option("--data-path", default="nyt-ingredients-snapshot-2015.csv", help="(%default)")
(options, args) = opts.parse_args(argv)
return options
| mit |
Reemh/taco_server | src/graph.py | 1 | 3232 | __author__ = 'Reem'
import diff_cache
import json
import os
import numpy as np
from sklearn import manifold
data_directory = 'plugins/taco_server/MDS_data/'
# this cache should cache the result with positions from MDS
# todo either use the following functions or delete them
def get_mds_cache(name):
file_name = data_directory + name + '.json'
if os.path.isfile(file_name):
with open(file_name) as data_file:
data = json.load(data_file)
return data
#if the file doesn't exist
return None
def set_mds_cache(name, data):
file_name = data_directory + name + '.json'
with open(file_name, 'w') as outfile:
json.dump(data, outfile)
#@param ids: should be a list of the table's ids
def calc_fd_graph(ids, direction, ops):
links = []
if len(ids) > 0:
# all elements except the last one
for i, id1 in enumerate(ids[:-1]):
# all elements except the i and all before
# +1 to make sure that they are not identical
for j, id2 in enumerate(ids[i+1:]):
r = diff_cache.get_ratios(id1, id2, direction, ops, bins=1, jsonit=False)
links += [{"source": ids.index(id1), "target": ids.index(id2), "value": 100 - float(r.no_ratio * 100)}]
# todo cache this in the MDS data
return links
def calc_mds_graph(ids, direction, ops):
# this is diff not similarities :|!
distances = []
for i, id1 in enumerate(ids):
sim_row = [0] * len(ids)
for j, id2 in enumerate(ids):
if j >= i:
break # because we already have this half or will fill it later
if id1 != id2:
# the direction here might always have to be 2 or we make it more flexible
r = diff_cache.get_ratios(id1, id2, direction, ops, bins=1, jsonit=False)
# todo to consider only the selected operations
# sim_row += [r.a_ratio + r.d_ratio + r.c_ratio]
val = 1 - r.no_ratio
# j column
sim_row[j] = val
# j row and i column
distances[j][i] = val
distances.append(sim_row)
# http://baoilleach.blogspot.co.at/2014/01/convert-distance-matrix-to-2d.html
# it doesn't really change the result :|
# adist = np.array(similarities)
# amax = np.amax(adist)
# adist /= amax
mds = manifold.MDS(n_components=2, max_iter=3000, random_state=6, eps=1e-9,
dissimilarity="precomputed", n_jobs=1)
res = mds.fit(distances)
# res = mds.fit(adist)
pos = res.embedding_
return pos_to_json(pos)
# we are not using this function as we get the name from the client anyway
def graph_nodes(ids):
# dataset.get(ids).name
# todo get the name from the dataset caleydo api
nodes = [{"name": str(i)} for i in ids]
return nodes
# convert the ndarray to a parsable json thing :|
def pos_to_json(pos):
json_pos = []
for i, p in enumerate(pos):
json_pos += [{'x': p[0], 'y': p[1]}]
return {'pos': json_pos,
'xmin': pos[:, 0].min(),
'xmax': pos[:, 0].max(),
'ymin': pos[:, 1].min(),
'ymax': pos[:, 1].max()}
| bsd-3-clause |
paladin74/neural-network-animation | matplotlib/path.py | 10 | 36297 | """
A module for dealing with the polylines used throughout matplotlib.
The primary class for polyline handling in matplotlib is :class:`Path`.
Almost all vector drawing makes use of Paths somewhere in the drawing
pipeline.
Whilst a :class:`Path` instance itself cannot be drawn, there exists
:class:`~matplotlib.artist.Artist` subclasses which can be used for
convenient Path visualisation - the two most frequently used of these are
:class:`~matplotlib.patches.PathPatch` and
:class:`~matplotlib.collections.PathCollection`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib import _path
from matplotlib.cbook import simple_linear_interpolation, maxdict
from matplotlib import rcParams
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
or :meth:`cleaned` to get the vertex/code pairs. This is important,
since many :class:`Path` objects, as an optimization, do not store a
*codes* at all, but have a default one provided for them by
:meth:`iter_segments`.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 79 # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
code_type = np.uint8
def __init__(self, vertices, codes=None, _interpolation_steps=1, closed=False,
readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : array_like
The ``(n, 2)`` float array, masked array or sequence of pairs
representing the vertices of the path.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
codes : {None, array_like}, optional
n-length array integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon.
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if codes is not None:
codes = np.asarray(codes, self.code_type)
assert codes.ndim == 1
assert len(codes) == len(vertices)
if len(codes):
assert codes[0] == self.MOVETO
elif closed:
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
assert vertices.ndim == 2
assert vertices.shape[1] == 2
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
@classmethod
def _fast_from_codes_and_verts(cls, verts, codes, internals=None):
"""
Creates a Path instance without the expense of calling the constructor
Parameters
----------
verts : numpy array
codes : numpy array (may not be None)
internals : dict or None
The attributes that the resulting path should have.
Allowed keys are ``readonly``, ``should_simplify``,
``simplify_threshold``, ``has_nonfinite`` and
``interpolation_steps``.
"""
internals = internals or {}
pth = cls.__new__(cls)
pth._vertices = verts
pth._codes = codes
pth._readonly = internals.pop('readonly', False)
pth.should_simplify = internals.pop('should_simplify', True)
pth.simplify_threshold = internals.pop('simplify_threshold',
rcParams['path.simplify_threshold'])
pth._has_nonfinite = internals.pop('has_nonfinite', False)
pth._interpolation_steps = internals.pop('interpolation_steps', 1)
if internals:
raise ValueError('Unexpected internals provided to '
'_fast_from_codes_and_verts: '
'{0}'.format('\n *'.join(six.iterkeys(internals))))
return pth
def _update_values(self):
self._should_simplify = (
rcParams['path.simplify'] and
(len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO))))
self._simplify_threshold = rcParams['path.simplify_threshold']
self._has_nonfinite = not np.isfinite(self._vertices).all()
@property
def vertices(self):
"""
The list of vertices in the `Path` as an Nx2 numpy array.
"""
return self._vertices
@vertices.setter
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
@property
def codes(self):
"""
The list of codes in the `Path` as a 1-D numpy array. Each
code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
or `CLOSEPOLY`. For codes that correspond to more than one
vertex (`CURVE3` and `CURVE4`), that code will be repeated so
that the length of `self.vertices` and `self.codes` is always
the same.
"""
return self._codes
@codes.setter
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
@property
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
@simplify_threshold.setter
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
@property
def has_nonfinite(self):
"""
`True` if the vertices array has nonfinite values.
"""
return self._has_nonfinite
@property
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
@should_simplify.setter
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
@property
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def __copy__(self):
"""
Returns a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
import copy
return copy.copy(self)
copy = __copy__
def __deepcopy__(self):
"""
Returns a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
return self.__class__(
self.vertices.copy(), self.codes.copy(),
_interpolation_steps=self._interpolation_steps)
deepcopy = __deepcopy__
@classmethod
def make_compound_path_from_polys(cls, XY):
"""
Make a compound path object to draw a number
of polygons with equal numbers of sides XY is a (numpolys x
numsides x 2) numpy array of vertices. Return object is a
:class:`Path`
.. plot:: mpl_examples/api/histogram_path_demo.py
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
assert(two==2)
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * cls.LINETO
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:,i]
return cls(verts, codes)
@classmethod
def make_compound_path(cls, *args):
"""Make a compound path from a list of Path objects."""
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = np.empty(total_length, dtype=cls.code_type)
i = 0
for path in args:
if path.codes is None:
codes[i] = cls.MOVETO
codes[i + 1:i + len(path.vertices)] = cls.LINETO
else:
codes[i:i + len(path.codes)] = path.codes
i += len(path.vertices)
return cls(vertices, codes)
def __repr__(self):
return "Path(%r, %r)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
Additionally, this method can provide a number of standard
cleanups and conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform` instance
If not None, the given affine transformation will
be applied to the path.
remove_nans : {False, True}, optional
If True, will remove all NaNs from the path and
insert MOVETO commands to skip over them.
clip : None or sequence, optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If None, auto-snap to pixels, to reduce
fuzziness of rectilinear lines. If True, force snapping, and
if False, don't snap.
stroke_width : float, optional
The width of the stroke being drawn. Needed
as a hint for the snapping algorithm.
simplify : None or bool, optional
If True, perform simplification, to remove
vertices that do not affect the appearance of the path. If
False, perform no simplification. If None, use the
should_simplify member variable.
curves : {True, False}, optional
If True, curve segments will be returned as curve
segments. If False, all curves will be converted to line
segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch
parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
vertices = cleaned.vertices
codes = cleaned.codes
len_vertices = vertices.shape[0]
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
i = 0
while i < len_vertices:
code = codes[i]
if code == STOP:
return
else:
num_vertices = NUM_VERTICES_FOR_CODE[code]
curr_vertices = vertices[i:i+num_vertices].flatten()
yield curr_vertices, code
i += num_vertices
def cleaned(self, transform=None, remove_nans=False, clip=None,
quantize=False, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Cleans up the path according to the parameters returning a new
Path instance.
.. seealso::
See :meth:`iter_segments` for details of the keyword arguments.
Returns
-------
Path instance with cleaned up vertices and codes.
"""
vertices, codes = _path.cleanup_path(self, transform,
remove_nans, clip,
snap, stroke_width,
simplify, curves, sketch)
internals = {'should_simplify': self.should_simplify and not simplify,
'has_nonfinite': self.has_nonfinite and not remove_nans,
'simplify_threshold': self.simplify_threshold,
'interpolation_steps': self._interpolation_steps}
return Path._fast_from_codes_and_verts(vertices, codes, internals)
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Returns *True* if the path contains the given point.
If *transform* is not *None*, the path will be transformed
before performing the test.
*radius* allows the path to be made slightly larger or
smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.point_in_path(point[0], point[1], radius, self, transform)
return result
def contains_points(self, points, transform=None, radius=0.0):
"""
Returns a bool array which is *True* if the path contains the
corresponding point.
If *transform* is not *None*, the path will be transformed
before performing the test.
*radius* allows the path to be made slightly larger or
smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result
def contains_path(self, path, transform=None):
"""
Returns *True* if this path completely contains the given path.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from .transforms import Bbox
path = self
if transform is not None:
transform = transform.frozen()
if not transform.is_affine:
path = self.transformed(transform)
transform = None
return Bbox(_path.get_path_extents(path, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from .transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
if steps == 1:
return self
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0):
"""
Convert this path to a list of polygons. Each polygon is an
Nx2 array of vertices. In other words, each polygon has no
``MOVETO`` instructions or curves. This is useful for
displaying in backends that do not support compound paths or
Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
if transform is None:
return [self.vertices]
else:
return [transform.transform(self.vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(self, transform, width, height)
_unit_rectangle = None
@classmethod
def unit_rectangle(cls):
"""
Return a :class:`Path` instance of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
[cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO, cls.CLOSEPOLY],
readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
@classmethod
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
codes = np.empty((numVertices + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
codes = np.empty((ns2 + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
@classmethod
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
@classmethod
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a Path representing a circle of a given radius and center.
Parameters
----------
center : pair of floats
The center of the circle. Default ``(0, 0)``.
radius : float
The radius of the circle. Default is 1.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=np.float_)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
@classmethod
def unit_circle_righthalf(cls):
"""
Return a :class:`Path` of the right half
of a unit circle. The circle is approximated using cubic Bezier
curves. This uses 4 splines around the circle using the approach
presented here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(14)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), np.float_)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset :end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset :end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
Return a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
_hatch_dict = maxdict(8)
@classmethod
def hatch(cls, hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a Path that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
if hatchpattern is None:
return None
hatch_path = cls._hatch_dict.get((hatchpattern, density))
if hatch_path is not None:
return hatch_path
hatch_path = get_path(hatchpattern, density)
cls._hatch_dict[(hatchpattern, density)] = hatch_path
return hatch_path
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
# Use make_compound_path_from_polys
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
def get_path_collection_extents(
master_transform, paths, transforms, offsets, offset_transform):
"""
Given a sequence of :class:`Path` objects,
:class:`~matplotlib.transforms.Transform` objects and offsets, as
found in a :class:`~matplotlib.collections.PathCollection`,
returns the bounding box that encapsulates all of them.
*master_transform* is a global transformation to apply to all paths
*paths* is a sequence of :class:`Path` instances.
*transforms* is a sequence of
:class:`~matplotlib.transforms.Affine2D` instances.
*offsets* is a sequence of (x, y) offsets (or an Nx2 array)
*offset_transform* is a :class:`~matplotlib.transforms.Affine2D`
to apply to the offsets before applying the offset to the path.
The way that *paths*, *transforms* and *offsets* are combined
follows the same method as for collections. Each is iterated over
independently, so if you have 3 paths, 2 transforms and 1 offset,
their combinations are as follows:
(A, A, A), (B, B, A), (C, A, A)
"""
from .transforms import Bbox
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
master_transform, paths, transforms, offsets, offset_transform))
def get_paths_extents(paths, transforms=[]):
"""
Given a sequence of :class:`Path` objects and optional
:class:`~matplotlib.transforms.Transform` objects, returns the
bounding box that encapsulates all of them.
*paths* is a sequence of :class:`Path` instances.
*transforms* is an optional sequence of
:class:`~matplotlib.transforms.Affine2D` instances to apply to
each path.
"""
from .transforms import Bbox, Affine2D
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
Affine2D(), paths, transforms, [], Affine2D()))
def _define_deprecated_functions(ns):
from .cbook import deprecated
# The C++ functions are not meant to be used directly.
# Users should use the more pythonic wrappers in the Path
# class instead.
for func, alternative in [
('point_in_path', 'path.Path.contains_point'),
('get_path_extents', 'path.Path.get_extents'),
('point_in_path_collection', 'collection.Collection.contains'),
('path_in_path', 'path.Path.contains_path'),
('path_intersects_path', 'path.Path.intersects_path'),
('convert_path_to_polygons', 'path.Path.to_polygons'),
('cleanup_path', 'path.Path.cleaned'),
('points_in_path', 'path.Path.contains_points'),
('clip_path_to_rect', 'path.Path.clip_to_bbox')]:
ns[func] = deprecated(
since='1.3', alternative=alternative)(getattr(_path, func))
_define_deprecated_functions(locals())
| mit |
parejkoj/dust | errors.py | 2 | 1366 |
import numpy as np
def prop_add( xerr=0.0, yerr=0.0 ):
return np.sqrt( xerr**2 + yerr**2 )
def prop_div( x, y, xerr=0.0, yerr=0.0 ):
F = x / y
return np.sqrt( xerr**2 + F**2 * yerr**2 ) / y
def prop_mult( x, y, xerr=0.0, yerr=0.0 ):
F = x * y
return np.sqrt( (xerr/x)**2 + (yerr/y)**2 ) * F
"""
## Quick test
import matplotlib.pyplot as plt
i = np.arange(10.0) + 1
x = 2.0 * i
y = 4.0 * i
xerr = np.zeros( 10.0 ) + 5.0
yerr = np.zeros( 10.0 ) + 2.5
test_add = prop_add( xerr, yerr )
test_div = prop_div( x, y, xerr, yerr )
test_mult = prop_mult( x, y, xerr, yerr )
fig = plt.figure()
plt.errorbar( i, x, yerr=xerr, ls='', color='r', lw=3, alpha=0.3 )
plt.errorbar( i, y, yerr=yerr, ls='', color='b', lw=3, alpha=0.3 )
plt.errorbar( i, x + y, yerr=test_add, ls='', color='0.3', lw=3, alpha=0.3 )
plt.errorbar( i, x / y, yerr=test_div, ls='', color='0.5', lw=3, alpha=0.3 )
plt.errorbar( i, x * y, yerr=test_mult, ls='', color='0.7', lw=3, alpha=0.3 )
plt.ylim(-1,1)
fig.show()
print 'Add, no yerr:', prop_add( xerr )
print 'Add, no xerr:', prop_add( yerr=yerr )
print 'Div, no yerr:', prop_div( x, y, xerr )
print 'Should be:', xerr / y
print 'Div, no xerr:', prop_div( x, y, yerr=yerr )
print 'Mult, no yerr:', prop_mult( x, y, xerr )
print 'Should be:', y * xerr
print 'Mult, no xerr:', prop_mult( x, y, yerr=yerr )
print 'Should be:', x * yerr
""" | bsd-2-clause |
ryanraaum/african-mtdna | popdata_sources/cerny2011/process.py | 1 | 1567 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import numpy as np
import sys
import csv
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0,'SeqRange'])
counts = []
sites = []
popnames = None
with open('cerny2011.csv', 'rU') as f:
reader = csv.reader(f)
header = reader.next()
popnames = header[2:]
for row in reader:
sites.append(str2sites(row[0], add16k=True))
counts.append(row[2:])
def convert(x):
if x == '':
return 0
return int(x)
countm = np.zeros((len(counts), len(popnames)), dtype=np.int)
for i in range(len(counts)):
countm[i] = [convert(x) for x in counts[i]]
## Validate
passed_validation = True
for i in range(len(sites)):
seq = sites2seq(sites[i], region)
mysites = seq2sites(seq)
if not mysites == sites[i]:
myseq = translate(sites2seq(mysites, region), None, '-')
if not seq == myseq:
passed_validation = False
print i
counter = {}
for name in popnames:
counter[name] = 0
if passed_validation:
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = "H%d" % (i + 1,)
seq = sites2seq(sites[i], region)
mysites = ' '.join([str(x) for x in seq2sites(seq)])
for j in range(len(popnames)):
prefix = metadata.ix[j,'NewPrefix']
for k in range(countm[i,j]):
counter[popnames[j]] += 1
num = str(counter[popnames[j]]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 |
rvraghav93/scikit-learn | benchmarks/bench_saga.py | 45 | 8474 | """Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| bsd-3-clause |
tacitia/ThoughtFlow | project/core/views/TopicModeler.py | 1 | 12602 | import gensim
from gensim.similarities.docsim import MatrixSimilarity
import math
import numpy as np
import pandas as pd
import textmining
import lda
import lda.datasets
from nltk.corpus import stopwords
from timeit import default_timer as timer
from collections import defaultdict
from pattern.en import singularize
from pprint import pprint
def compute_tdm(docs):
# Create some very short sample documents
# doc1 = 'The prefrontal cortex (PFC) subserves cognitive control: the ability to coordinate thoughts or actions in relation with internal goals. Its functional architecture, however, remains poorly understood. Using brain imaging in humans, we showed that the lateral PFC is organized as a cascade of executive processes from premotor to anterior PFC regions that control behavior according to stimuli, the present perceptual context, and the temporal episode in which stimuli occur, respectively. The results support an unified modular model of cognitive control that describes the overall functional organization of the human lateral PFC and has basic methodological and theoretical implications.'
# doc2 = 'The prefrontal cortex (PFC) is central to flexible and organized action. Recent theoretical and empirical results suggest that the rostro-caudal axis of the frontal lobes may reflect a hierarchical organization of control. Here, we test whether the rostro-caudal axis of the PFC is organized hierarchically, based on the level of abstraction at which multiple representations compete to guide selection of action. Four functional magnetic resonance imaging (fMRI) experiments parametrically manipulated the set of task-relevant (a) responses, (b) features, (c) dimensions, and (d) overlapping cue-to-dimension mappings. A systematic posterior to anterior gradient was evident within the PFC depending on the manipulated level of representation. Furthermore, across four fMRI experiments, activation in PFC subregions was consistent with the sub- and superordinate relationships that define an abstract representational hierarchy. In addition to providing further support for a representational hierarchy account of the rostro-caudal gradient in the PFC, these data provide important empirical constraints on current theorizing about control hierarchies and the PFC.'
# doc3 = 'Control regions in the brain are thought to provide signals that configure the brains moment-to-moment information processing. Previously, we identified regions that carried signals related to task-control initiation, maintenance, and adjustment. Here we characterize the interactions of these regions by applying graph theory to resting state functional connectivity MRI data. In contrast to previous, more unitary models of control, this approach suggests the presence of two distinct task-control networks. A frontoparietal network included the dorsolateral prefrontal cortex and intraparietal sulcus. This network emphasized start-cue and error-related activity and may initiate and adapt control on a trial-by-trial basis. The second network included dorsal anterior cingulate/medial superior frontal cortex, anterior insula/frontal operculum, and anterior prefrontal cortex. Among other signals, these regions showed activity sustained across the entire task epoch, suggesting that this network may control goal-directed behavior through the stable maintenance of task sets. These two independent networks appear to operate on different time scales and affect downstream processing via dissociable mechanisms.'
# doc4 = 'Neuromodulators such as dopamine have a central role in cognitive disorders. In the past decade, biological findings on dopamine function have been infused with concepts taken from computational theories of reinforcement learning. These more abstract approaches have now been applied to describe the biological algorithms at play in our brains when we form value judgements and make choices. The application of such quantitative models has opened up new fields, ripe for attack by young synthesizers and theoreticians.'
# Initialize class to create term-document matrix
tdm = textmining.TermDocumentMatrix()
print '>> filtering stopwords...'
englishStopWords = get_stopwords('english')
for d in docs:
words = d.split(' ')
filtered_words = filter(lambda x: x.lower() not in englishStopWords, words)
tdm.add_doc(' '.join(filtered_words))
print '>> computing tdm...'
raw_matrix = list(tdm.rows(cutoff=2))
return raw_matrix
# filtered_matrix = filter_stopwords(raw_matrix)
# return filtered_matrix
# return apply_tfidt_transform(raw_matrix)
def get_stopwords(language, name):
result = stopwords.words(language)
result.extend(['new', 'using', 'used', 'finding', 'findings'])
if (name == 'TVCG'):
result.extend(['datum', 'present', 'use', 'show', 'two', 'paper', 'different', 'visual', 'visualization', 'also', 'since', 'acquired', 'thus', 'lack', 'due', 'studied', 'useful', 'possible', 'additional', 'particular', 'describe', 'without', 'reported', 'among', 'always', 'various', 'prove', 'usable', 'yet', 'ask', 'within', 'even', 'best', 'run', 'including', 'like', 'importantly', 'six', 'look', 'along', 'one', 'visually', 'ha', 'wa'])
return result
def filter_stopwords(matrix):
header = matrix[0]
filtered_counts = [[row[col_idx] for col_idx in range(len(row)) if header[col_idx] not in stopwords.words('english')] for row in matrix[1:]]
filtered_header = filter(lambda x: x not in stopwords.words('english'), header)
return [filtered_header] + filtered_counts
def apply_tfidt_transform(matrix):
# print matrix
num_document = float(reduce(lambda x, y: x+1, matrix, 0))
term_counts = [sum(row[col_idx] for row in matrix[1:]) for col_idx in range(len(matrix[0]))]
for row in matrix[1:]:
num_word = float(reduce(lambda x, y: x+y, row))
for col_idx in range(len(row)):
cell = row[col_idx]
if cell != 0:
term_occurrence = term_counts[col_idx]
term_freq = cell / num_word
inverse_doc_freq = np.log(abs(num_document / term_occurrence))
row[col_idx] = term_freq * inverse_doc_freq
return convert_to_positive_integer_matrix(matrix)
def convert_to_positive_integer_matrix(matrix):
result = []
term_counts = [sum(row[col_idx] for row in matrix[1:]) for col_idx in range(len(matrix[0]))]
for row_idx in range(len(matrix)):
new_row = []
for col_idx in range(len(matrix[row_idx])):
if term_counts[col_idx] > 0:
if row_idx == 0:
new_row.append(matrix[row_idx][col_idx])
else:
new_row.append(int(matrix[row_idx][col_idx] * 1000)) # should we actually try to infer this?
result.append(new_row)
return result
def fit_topic_model(tdm, vocab):
print '>> fitting topic model...'
n_doc = len(tdm)
model = lda.LDA(n_topics=min(n_doc/10+2, 5), n_iter=100, random_state=1)
model.fit(tdm)
topic_word = model.topic_word_
doc_topic = model.doc_topic_
# take the top 8 words for each topic
topics = []
n_top_words = 8
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-n_top_words:-1]
# print('Topic {}: {}'.format(i, ' '.join(topic_words)))
topics.append(topic_words.tolist())
# print the topic for each document
doc_topic_map = {}
for n in range(n_doc):
# topic_most_pr = doc_topic[n].argmax()
# print("doc: {} topic: {}\n".format(n, topic_most_pr))
# doc_topic_map[n] = topic_most_pr
doc_topic_map[n] = {}
doc_topic_map[n]['dist'] = doc_topic[n].tolist()
doc_topic_map[n]['max'] = doc_topic[n].argmax()
return topics, doc_topic_map
def run(docs):
tdm = compute_tdm(docs)
return fit_topic_model(np.array(tdm[1:]), tdm[0])
# Print out words with negative weight
#for col_idx in range(len(tdm[0])):
# if tdm[1][col_idx] < 0:
# print tdm[0][col_idx]
def generate_dictionary(texts, name, numDocs):
print '>> generating dictionary...'
dictionary = gensim.corpora.Dictionary(texts)
numDocs = len(texts)
print numDocs
dictionary.filter_extremes(no_below=20, no_above=0.3, keep_n=100000)
dictionary.save(name + '.dict')
print 'dictionary information: '
print dictionary
return dictionary
# Extensions since last run:
# - singularize individual tokens
def docs2corpus(docs, name, isNew):
print '>> converting documents to corpus...'
numDocs = len(docs)
englishStopWords = get_stopwords('english', name)
# texts = [[word for word in doc.lower().split() if word not in englishStopWords and word.isalpha() and len(word) > 1] for doc in docs]
texts = [[singularize(word) for word in doc.lower().split() if singularize(word) not in englishStopWords and word.isalpha() and len(word) > 1] for doc in docs]
# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
print len(texts)
if isNew:
dictionary = generate_dictionary(texts, name, numDocs) #uncomment for new corpus
else:
dictionary = gensim.corpora.Dictionary.load(name + '.dict')
corpus = [dictionary.doc2bow(text) for text in texts]
if isNew:
gensim.corpora.MmCorpus.serialize(name + '.mm', corpus) # store to disk, for later use
return corpus, dictionary
def get_document_topics(doc, name):
lda = gensim.models.ldamodel.LdaModel.load(name + '.lda')
englishStopWords = get_stopwords('english', name)
text = [singularize(word) for word in doc.lower().split() if singularize(word) not in englishStopWords and word.isalpha() and len(word) > 1]
dictionary = gensim.corpora.Dictionary.load(name + '.dict')
document_topics = lda.get_document_topics(dictionary.doc2bow(text), minimum_probability=0.05)
if len(document_topics) > 0:
primary_topic_tuple = max(document_topics, key=lambda x:x[1])
topic_terms = lda.show_topic(primary_topic_tuple[0])
print topic_terms
return document_topics, topic_terms
else:
return [], ''
def compute_documents_similarity_sub(target, docs, name):
print 'here'
corpus, dictionary = docs2corpus(docs, name, False)
lda = gensim.models.ldamodel.LdaModel.load(name + '.lda')
# dictionary = gensim.corpora.Dictionary.load('/tmp/' + name + '.dict')
numTokens = len(dictionary.values())
lda_corpus = lda[corpus]
index = MatrixSimilarity(lda_corpus, num_features=numTokens)
print index
sims = index[target]
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
top_documents = sort_sims[:200]
return map(lambda item: item[0], top_documents)
# target is an array of topic distribution
def compute_documents_similarity(target, name):
dictionary = gensim.corpora.Dictionary.load(name + '.dict')
index = MatrixSimilarity.load(name + '.sim')
print index
sims = index[target]
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
top_documents = sort_sims[:200]
return map(lambda item: item[0], top_documents)
def lda2topicMap(lda, corpus, ids, name):
print '>> generating topic map...'
evidenceTopicMap = {}
# dictionary = gensim.corpora.Dictionary.load('/tmp/' + name + '.dict')
i = 0
for c in corpus:
# b = dictionary.doc2bow(d)
evidenceTopicMap[ids[i]] = lda.get_document_topics(c, minimum_probability=0.01)
i += 1
print len(evidenceTopicMap)
return evidenceTopicMap
def create_online_lda(docs, ids, name, numTopics):
corpus, dictionary = docs2corpus(docs, name, True)
print '>> generating online lda model...'
lda = gensim.models.ldamodel.LdaModel(corpus, num_topics=numTopics, id2word=dictionary, passes=10)
print lda
lda.save(name + '.lda')
return lda2topicMap(lda, corpus, ids, name), lda.show_topics(formatted=False)
def load_online_lda(docs, ids, name):
print '>> loading online lda model...'
corpus, dictionary = docs2corpus(docs, name, False)
lda = gensim.models.ldamodel.LdaModel.load(name + '.lda')
# return a map from evidence to topic and a list of topics
return lda2topicMap(lda, corpus, ids, name), lda.show_topics(formatted=False)
def get_online_lda_topics(name, numTopics):
lda = gensim.models.ldamodel.LdaModel.load(name + '.lda')
return lda.show_topics(num_topics=numTopics, formatted=False)
def create_similarity_matrix(name):
lda = gensim.models.ldamodel.LdaModel.load(name + '.lda')
corpus = gensim.corpora.MmCorpus(name + '.mm')
lda_corpus = lda[corpus]
dictionary = gensim.corpora.Dictionary.load(name + '.dict')
numTokens = len(dictionary.values())
index = MatrixSimilarity(lda_corpus, num_features=numTokens)
index.save(name + '.sim')
return
| mit |
bmcfee/librosa | librosa/beat.py | 2 | 22985 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Beat and tempo
==============
.. autosummary::
:toctree: generated/
beat_track
plp
tempo
"""
import numpy as np
import scipy
import scipy.stats
from ._cache import cache
from . import core
from . import onset
from . import util
from .feature import tempogram, fourier_tempogram
from .util.exceptions import ParameterError
__all__ = ["beat_track", "tempo", "plp"]
def beat_track(
y=None,
sr=22050,
onset_envelope=None,
hop_length=512,
start_bpm=120.0,
tightness=100,
trim=True,
bpm=None,
prior=None,
units="frames",
):
r"""Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [#]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [#] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of ``y``
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive ``onset_envelope`` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use ``bpm`` as the tempo instead of
estimating it from ``onsets``.
prior : scipy.stats.rv_continuous [optional]
An optional prior distribution over tempo.
If provided, ``start_bpm`` will be ignored.
units : {'frames', 'samples', 'time'}
The units to encode detected beat events in.
By default, 'frames' are used.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
estimated beat event locations in the specified units
(default is frame indices)
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither ``y`` nor ``onset_envelope`` are provided,
or if ``units`` is not one of 'frames', 'samples', or 'time'
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.ex('choice'), duration=10)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
135.99917763157896
Print the frames corresponding to beats
>>> beats
array([ 3, 21, 40, 59, 78, 96, 116, 135, 154, 173, 192, 211,
230, 249, 268, 287, 306, 325, 344, 363])
Or print them as timestamps
>>> librosa.frames_to_time(beats, sr=sr)
array([0.07 , 0.488, 0.929, 1.37 , 1.811, 2.229, 2.694, 3.135,
3.576, 4.017, 4.458, 4.899, 5.341, 5.782, 6.223, 6.664,
7.105, 7.546, 7.988, 8.429])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
135.99917763157896
>>> beats
array([ 3, 21, 40, 59, 78, 96, 116, 135, 154, 173, 192, 211,
230, 249, 268, 287, 306, 325, 344, 363])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> times = librosa.times_like(onset_env, sr=sr, hop_length=hop_length)
>>> M = librosa.feature.melspectrogram(y=y, sr=sr, hop_length=hop_length)
>>> librosa.display.specshow(librosa.power_to_db(M, ref=np.max),
... y_axis='mel', x_axis='time', hop_length=hop_length,
... ax=ax[0])
>>> ax[0].label_outer()
>>> ax[0].set(title='Mel spectrogram')
>>> ax[1].plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> ax[1].vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> ax[1].legend()
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError("y or onset_envelope must be provided")
onset_envelope = onset.onset_strength(
y=y, sr=sr, hop_length=hop_length, aggregate=np.median
)
# Do we have any onsets to grab?
if not onset_envelope.any():
return (0, np.array([], dtype=int))
# Estimate BPM if one was not provided
if bpm is None:
bpm = tempo(
onset_envelope=onset_envelope,
sr=sr,
hop_length=hop_length,
start_bpm=start_bpm,
prior=prior,
)[0]
# Then, run the tracker
beats = __beat_tracker(onset_envelope, bpm, float(sr) / hop_length, tightness, trim)
if units == "frames":
pass
elif units == "samples":
beats = core.frames_to_samples(beats, hop_length=hop_length)
elif units == "time":
beats = core.frames_to_time(beats, hop_length=hop_length, sr=sr)
else:
raise ParameterError("Invalid unit type: {}".format(units))
return (bpm, beats)
@cache(level=30)
def tempo(
y=None,
sr=22050,
onset_envelope=None,
hop_length=512,
start_bpm=120,
std_bpm=1.0,
ac_size=8.0,
max_tempo=320.0,
aggregate=np.mean,
prior=None,
):
"""Estimate the tempo (beats per minute)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of the time series
onset_envelope : np.ndarray [shape=(n,)]
pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length of the time series
start_bpm : float [scalar]
initial guess of the BPM
std_bpm : float > 0 [scalar]
standard deviation of tempo distribution
ac_size : float > 0 [scalar]
length (in seconds) of the auto-correlation window
max_tempo : float > 0 [scalar, optional]
If provided, only estimate tempo below this threshold
aggregate : callable [optional]
Aggregation function for estimating global tempo.
If `None`, then tempo is estimated independently for each frame.
prior : scipy.stats.rv_continuous [optional]
A prior distribution over tempo (in beats per minute).
By default, a pseudo-log-normal prior is used.
If given, ``start_bpm`` and ``std_bpm`` will be ignored.
Returns
-------
tempo : np.ndarray [scalar]
estimated tempo (beats per minute)
See Also
--------
librosa.onset.onset_strength
librosa.feature.tempogram
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Estimate a static tempo
>>> y, sr = librosa.load(librosa.ex('nutcracker'), duration=30)
>>> onset_env = librosa.onset.onset_strength(y, sr=sr)
>>> tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
>>> tempo
array([143.555])
>>> # Or a static tempo with a uniform prior instead
>>> import scipy.stats
>>> prior = scipy.stats.uniform(30, 300) # uniform over 30-300 BPM
>>> utempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr, prior=prior)
>>> utempo
array([161.499])
>>> # Or a dynamic tempo
>>> dtempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr,
... aggregate=None)
>>> dtempo
array([ 89.103, 89.103, 89.103, ..., 123.047, 123.047, 123.047])
>>> # Dynamic tempo with a proper log-normal prior
>>> prior_lognorm = scipy.stats.lognorm(loc=np.log(120), scale=120, s=1)
>>> dtempo_lognorm = librosa.beat.tempo(onset_envelope=onset_env, sr=sr,
... aggregate=None,
... prior=prior_lognorm)
>>> dtempo_lognorm
array([ 89.103, 89.103, 89.103, ..., 123.047, 123.047, 123.047])
Plot the estimated tempo against the onset autocorrelation
>>> import matplotlib.pyplot as plt
>>> # Convert to scalar
>>> tempo = tempo.item()
>>> utempo = utempo.item()
>>> # Compute 2-second windowed autocorrelation
>>> hop_length = 512
>>> ac = librosa.autocorrelate(onset_env, 2 * sr // hop_length)
>>> freqs = librosa.tempo_frequencies(len(ac), sr=sr,
... hop_length=hop_length)
>>> # Plot on a BPM axis. We skip the first (0-lag) bin.
>>> fig, ax = plt.subplots()
>>> ax.semilogx(freqs[1:], librosa.util.normalize(ac)[1:],
... label='Onset autocorrelation', basex=2)
>>> ax.axvline(tempo, 0, 1, alpha=0.75, linestyle='--', color='r',
... label='Tempo (default prior): {:.2f} BPM'.format(tempo))
>>> ax.axvline(utempo, 0, 1, alpha=0.75, linestyle=':', color='g',
... label='Tempo (uniform prior): {:.2f} BPM'.format(utempo))
>>> ax.set(xlabel='Tempo (BPM)', title='Static tempo estimation')
>>> ax.grid(True)
>>> ax.legend()
Plot dynamic tempo estimates over a tempogram
>>> fig, ax = plt.subplots()
>>> tg = librosa.feature.tempogram(onset_envelope=onset_env, sr=sr,
... hop_length=hop_length)
>>> librosa.display.specshow(tg, x_axis='time', y_axis='tempo', cmap='magma', ax=ax)
>>> ax.plot(librosa.times_like(dtempo), dtempo,
... color='c', linewidth=1.5, label='Tempo estimate (default prior)')
>>> ax.plot(librosa.times_like(dtempo_lognorm), dtempo_lognorm,
... color='c', linewidth=1.5, linestyle='--',
... label='Tempo estimate (lognorm prior)')
>>> ax.set(title='Dynamic tempo estimation')
>>> ax.legend()
"""
if start_bpm <= 0:
raise ParameterError("start_bpm must be strictly positive")
win_length = core.time_to_frames(ac_size, sr=sr, hop_length=hop_length).item()
tg = tempogram(
y=y,
sr=sr,
onset_envelope=onset_envelope,
hop_length=hop_length,
win_length=win_length,
)
# Eventually, we want this to work for time-varying tempo
if aggregate is not None:
tg = aggregate(tg, axis=1, keepdims=True)
# Get the BPM values for each bin, skipping the 0-lag bin
bpms = core.tempo_frequencies(tg.shape[0], hop_length=hop_length, sr=sr)
# Weight the autocorrelation by a log-normal distribution
if prior is None:
logprior = -0.5 * ((np.log2(bpms) - np.log2(start_bpm)) / std_bpm) ** 2
else:
logprior = prior.logpdf(bpms)
# Kill everything above the max tempo
if max_tempo is not None:
max_idx = np.argmax(bpms < max_tempo)
logprior[:max_idx] = -np.inf
# Get the maximum, weighted by the prior
# Using log1p here for numerical stability
best_period = np.argmax(np.log1p(1e6 * tg) + logprior[:, np.newaxis], axis=0)
return bpms[best_period]
def plp(
y=None,
sr=22050,
onset_envelope=None,
hop_length=512,
win_length=384,
tempo_min=30,
tempo_max=300,
prior=None,
):
"""Predominant local pulse (PLP) estimation. [#]_
The PLP method analyzes the onset strength envelope in the frequency domain
to find a locally stable tempo for each frame. These local periodicities
are used to synthesize local half-waves, which are combined such that peaks
coincide with rhythmically salient frames (e.g. onset events on a musical time grid).
The local maxima of the pulse curve can be taken as estimated beat positions.
This method may be preferred over the dynamic programming method of `beat_track`
when either the tempo is expected to vary significantly over time. Additionally,
since `plp` does not require the entire signal to make predictions, it may be
preferable when beat-tracking long recordings in a streaming setting.
.. [#] Grosche, P., & Muller, M. (2011).
"Extracting predominant local pulse information from music recordings."
IEEE Transactions on Audio, Speech, and Language Processing, 19(6), 1688-1701.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of ``y``
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
number of audio samples between successive ``onset_envelope`` values
win_length : int > 0 [scalar]
number of frames to use for tempogram analysis.
By default, 384 frames (at ``sr=22050`` and ``hop_length=512``) corresponds
to about 8.9 seconds.
tempo_min, tempo_max : numbers > 0 [scalar], optional
Minimum and maximum permissible tempo values. ``tempo_max`` must be at least
``tempo_min``.
Set either (or both) to `None` to disable this constraint.
prior : scipy.stats.rv_continuous [optional]
A prior distribution over tempo (in beats per minute).
By default, a uniform prior over ``[tempo_min, tempo_max]`` is used.
Returns
-------
pulse : np.ndarray, shape=[(n,)]
The estimated pulse curve. Maxima correspond to rhythmically salient
points of time.
See Also
--------
beat_track
librosa.onset.onset_strength
librosa.feature.fourier_tempogram
Examples
--------
Visualize the PLP compared to an onset strength envelope.
Both are normalized here to make comparison easier.
>>> y, sr = librosa.load(librosa.ex('brahms'))
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> pulse = librosa.beat.plp(onset_envelope=onset_env, sr=sr)
>>> # Or compute pulse with an alternate prior, like log-normal
>>> import scipy.stats
>>> prior = scipy.stats.lognorm(loc=np.log(120), scale=120, s=1)
>>> pulse_lognorm = librosa.beat.plp(onset_envelope=onset_env, sr=sr,
... prior=prior)
>>> melspec = librosa.feature.melspectrogram(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharex=True)
>>> librosa.display.specshow(librosa.power_to_db(melspec,
... ref=np.max),
... x_axis='time', y_axis='mel', ax=ax[0])
>>> ax[0].set(title='Mel spectrogram')
>>> ax[0].label_outer()
>>> ax[1].plot(librosa.times_like(onset_env),
... librosa.util.normalize(onset_env),
... label='Onset strength')
>>> ax[1].plot(librosa.times_like(pulse),
... librosa.util.normalize(pulse),
... label='Predominant local pulse (PLP)')
>>> ax[1].set(title='Uniform tempo prior [30, 300]')
>>> ax[1].label_outer()
>>> ax[2].plot(librosa.times_like(onset_env),
... librosa.util.normalize(onset_env),
... label='Onset strength')
>>> ax[2].plot(librosa.times_like(pulse_lognorm),
... librosa.util.normalize(pulse_lognorm),
... label='Predominant local pulse (PLP)')
>>> ax[2].set(title='Log-normal tempo prior, mean=120', xlim=[5, 20])
>>> ax[2].legend()
PLP local maxima can be used as estimates of beat positions.
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env)
>>> beats_plp = np.flatnonzero(librosa.util.localmax(pulse))
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
>>> times = librosa.times_like(onset_env, sr=sr)
>>> ax[0].plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> ax[0].vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> ax[0].legend()
>>> ax[0].set(title='librosa.beat.beat_track')
>>> ax[0].label_outer()
>>> # Limit the plot to a 15-second window
>>> times = librosa.times_like(pulse, sr=sr)
>>> ax[1].plot(times, librosa.util.normalize(pulse),
... label='PLP')
>>> ax[1].vlines(times[beats_plp], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='PLP Beats')
>>> ax[1].legend()
>>> ax[1].set(title='librosa.beat.plp', xlim=[5, 20])
>>> ax[1].xaxis.set_major_formatter(librosa.display.TimeFormatter())
"""
# Step 1: get the onset envelope
if onset_envelope is None:
onset_envelope = onset.onset_strength(
y=y, sr=sr, hop_length=hop_length, aggregate=np.median
)
if tempo_min is not None and tempo_max is not None and tempo_max <= tempo_min:
raise ParameterError(
"tempo_max={} must be larger than tempo_min={}".format(tempo_max, tempo_min)
)
# Step 2: get the fourier tempogram
ftgram = fourier_tempogram(
onset_envelope=onset_envelope,
sr=sr,
hop_length=hop_length,
win_length=win_length,
)
# Step 3: pin to the feasible tempo range
tempo_frequencies = core.fourier_tempo_frequencies(
sr=sr, hop_length=hop_length, win_length=win_length
)
if tempo_min is not None:
ftgram[tempo_frequencies < tempo_min] = 0
if tempo_max is not None:
ftgram[tempo_frequencies > tempo_max] = 0
# Step 3: Discard everything below the peak
ftmag = np.log1p(1e6 * np.abs(ftgram))
if prior is not None:
ftmag += prior.logpdf(tempo_frequencies)[:, np.newaxis]
peak_values = ftmag.max(axis=0, keepdims=True)
ftgram[ftmag < peak_values] = 0
# Normalize to keep only phase information
ftgram /= util.tiny(ftgram) ** 0.5 + np.abs(ftgram.max(axis=0, keepdims=True))
# Step 5: invert the Fourier tempogram to get the pulse
pulse = core.istft(ftgram, hop_length=1, length=len(onset_envelope))
# Step 6: retain only the positive part of the pulse cycle
np.clip(pulse, 0, None, pulse)
# Return the normalized pulse
return util.normalize(pulse)
def __beat_tracker(onset_envelope, bpm, fft_res, tightness, trim):
"""Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
"""
if bpm <= 0:
raise ParameterError("bpm must be strictly positive")
# convert bpm to a sample period for searching
period = round(60.0 * fft_res / bpm)
# localscore is a smoothed version of AGC'd onset envelope
localscore = __beat_local_score(onset_envelope, period)
# run the DP
backlink, cumscore = __beat_track_dp(localscore, period, tightness)
# get the position of the last beat
beats = [__last_beat(cumscore)]
# Reconstruct the beat path from backlinks
while backlink[beats[-1]] >= 0:
beats.append(backlink[beats[-1]])
# Put the beats in ascending order
# Convert into an array of frame numbers
beats = np.array(beats[::-1], dtype=int)
# Discard spurious trailing beats
beats = __trim_beats(localscore, beats, trim)
return beats
# -- Helper functions for beat tracking
def __normalize_onsets(onsets):
"""Maps onset strength function into the range [0, 1]"""
norm = onsets.std(ddof=1)
if norm > 0:
onsets = onsets / norm
return onsets
def __beat_local_score(onset_envelope, period):
"""Construct the local score for an onset envlope and given period"""
window = np.exp(-0.5 * (np.arange(-period, period + 1) * 32.0 / period) ** 2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope), window, "same")
def __beat_track_dp(localscore, period, tightness):
"""Core dynamic program for beat tracking"""
backlink = np.zeros_like(localscore, dtype=int)
cumscore = np.zeros_like(localscore)
# Search range for previous beat
window = np.arange(-2 * period, -np.round(period / 2) + 1, dtype=int)
# Make a score window, which begins biased toward start_bpm and skewed
if tightness <= 0:
raise ParameterError("tightness must be strictly positive")
txwt = -tightness * (np.log(-window / period) ** 2)
# Are we on the first beat?
first_beat = True
for i, score_i in enumerate(localscore):
# Are we reaching back before time 0?
z_pad = np.maximum(0, min(-window[0], len(window)))
# Search over all possible predecessors
candidates = txwt.copy()
candidates[z_pad:] = candidates[z_pad:] + cumscore[window[z_pad:]]
# Find the best preceding beat
beat_location = np.argmax(candidates)
# Add the local score
cumscore[i] = score_i + candidates[beat_location]
# Special case the first onset. Stop if the localscore is small
if first_beat and score_i < 0.01 * localscore.max():
backlink[i] = -1
else:
backlink[i] = window[beat_location]
first_beat = False
# Update the time range
window = window + 1
return backlink, cumscore
def __last_beat(cumscore):
"""Get the last beat from the cumulative score array"""
maxes = util.localmax(cumscore)
med_score = np.median(cumscore[np.argwhere(maxes)])
# The last of these is the last beat (since score generally increases)
return np.argwhere((cumscore * maxes * 2 > med_score)).max()
def __trim_beats(localscore, beats, trim):
"""Final post-processing: throw out spurious leading/trailing beats"""
smooth_boe = scipy.signal.convolve(localscore[beats], scipy.signal.hann(5), "same")
if trim:
threshold = 0.5 * ((smooth_boe ** 2).mean() ** 0.5)
else:
threshold = 0.0
valid = np.argwhere(smooth_boe > threshold)
return beats[valid.min() : valid.max()]
| isc |
zerothi/sids | sisl/io/siesta/out.py | 1 | 28117 | import os
import numpy as np
from .sile import SileSiesta
from ..sile import add_sile, sile_fh_open
from sisl._internal import set_module
import sisl._array as _a
from sisl import Geometry, Atom, SuperCell
from sisl.utils.cmd import *
from sisl.unit.siesta import unit_convert
__all__ = ['outSileSiesta']
Bohr2Ang = unit_convert('Bohr', 'Ang')
def _ensure_species(species):
""" Ensures that the species list is a list with entries (converts `None` to a list). """
if species is None:
return [Atom(i) for i in range(150)]
return species
@set_module("sisl.io.siesta")
class outSileSiesta(SileSiesta):
""" Output file from Siesta
This enables reading the output quantities from the Siesta output.
"""
_job_completed = False
def readline(self):
line = super().readline()
if 'Job completed' in line:
self._job_completed = True
return line
readline.__doc__ = SileSiesta.readline.__doc__
@property
def job_completed(self):
""" True if the full file has been read and "Job completed" was found. """
return self._job_completed
@sile_fh_open()
def read_species(self):
""" Reads the species from the top of the output file.
If wanting the species this HAS to be the first routine called.
It returns an array of `Atom` objects which may easily be indexed.
"""
line = self.readline()
while not 'Species number:' in line:
line = self.readline()
if line == '':
# We fake the species by direct atomic number
return None
atom = []
while 'Species number:' in line:
ls = line.split()
if ls[3] == 'Atomic':
atom.append(Atom(int(ls[5]), tag=ls[7]))
else:
atom.append(Atom(int(ls[7]), tag=ls[4]))
line = self.readline()
return atom
def _read_supercell_outcell(self):
""" Wrapper for reading the unit-cell from the outcoor block """
# Read until outcell is found
line = self.readline()
while not 'outcell: Unit cell vectors' in line:
line = self.readline()
Ang = 'Ang' in line
# We read the unit-cell vectors (in Ang)
cell = []
line = self.readline()
while len(line.strip()) > 0:
line = line.split()
cell.append([float(x) for x in line[:3]])
line = self.readline()
cell = _a.arrayd(cell)
if not Ang:
cell *= Bohr2Ang
return SuperCell(cell)
def _read_geometry_outcoor(self, line, species=None):
""" Wrapper for reading the geometry as in the outcoor output """
species = _ensure_species(species)
# Now we have outcoor
scaled = 'scaled' in line
fractional = 'fractional' in line
Ang = 'Ang' in line
# Read in data
xyz = []
spec = []
line = self.readline()
while len(line.strip()) > 0:
line = line.split()
xyz.append([float(x) for x in line[:3]])
spec.append(int(line[3]))
line = self.readline()
# in outcoor we know it is always just after
cell = self._read_supercell_outcell()
xyz = _a.arrayd(xyz)
# Now create the geometry
if scaled:
# The output file for siesta does not
# contain the lattice constant.
# So... :(
raise ValueError("Could not read the lattice-constant for the scaled geometry")
elif fractional:
xyz = xyz.dot(cell.cell)
elif not Ang:
xyz *= Bohr2Ang
# Assign the correct species
geom = Geometry(xyz, [species[ia - 1] for ia in spec], sc=cell)
return geom
def _read_geometry_atomic(self, line, species=None):
""" Wrapper for reading the geometry as in the outcoor output """
species = _ensure_species(species)
# Now we have outcoor
Ang = 'Ang' in line
# Read in data
xyz = []
atom = []
line = self.readline()
while len(line.strip()) > 0:
line = line.split()
xyz.append([float(x) for x in line[1:4]])
atom.append(species[int(line[4])-1])
line = self.readline()
# Retrieve the unit-cell (but do not skip file-descriptor position)
# This is because the current unit-cell is not always written.
pos = self.fh.tell()
cell = self._read_supercell_outcell()
self.fh.seek(pos, os.SEEK_SET)
# Convert xyz
xyz = _a.arrayd(xyz)
if not Ang:
xyz *= Bohr2Ang
return Geometry(xyz, atom, sc=cell)
@sile_fh_open()
def read_geometry(self, last=True, all=False):
""" Reads the geometry from the Siesta output file
Parameters
----------
last: bool, optional
only read the last geometry
all: bool, optional
return a list of all geometries (like an MD)
If `True` `last` is ignored
Returns
-------
geometries: list or Geometry or None
if all is False only one geometry will be returned (or None). Otherwise
a list of geometries corresponding to the MD-runs.
"""
# The first thing we do is reading the species.
# Sadly, if this routine is called AFTER some other
# reading process, it may fail...
# Perhaps we should rewind to ensure this...
# But...
species = self.read_species()
if all:
# force last to be false
last = False
def type_coord(line):
if 'outcoor' in line:
return 1
elif 'siesta: Atomic coordinates' in line:
return 2
# Signal not found
return 0
def next_geom():
coord = 0
while coord == 0:
line = self.readline()
if line == '':
return 0, None
coord = type_coord(line)
if coord == 1:
return 1, self._read_geometry_outcoor(line, species)
elif coord == 2:
return 2, self._read_geometry_atomic(line, species)
# Read until a coordinate block is found
geom0 = None
mds = []
if all or last:
# we need to read through all things!
while True:
coord, geom = next_geom()
if coord == 0:
break
if coord == 2:
geom0 = geom
else:
mds.append(geom)
# Since the user requests only the MD geometries
# we only return those
if last:
if len(mds) > 0:
return mds[-1]
return geom0
return mds
# just read the next geometry we hit
return next_geom()[1]
@sile_fh_open()
def read_force(self, last=True, all=False, total=False, max=False):
""" Reads the forces from the Siesta output file
Parameters
----------
last: bool, optional
only read the last force
all: bool, optional
return a list of all forces (like an MD)
If `True` `last` is ignored
total: bool, optional
return the total forces instead of the atomic forces.
max: bool, optional
whether only the maximum atomic force should be returned for each step.
Setting it to `True` is equivalent to `max(outSile.read_force())` in case atomic forces
are written in the output file (`WriteForces .true.` in the fdf file)
Note that this is not the same as doing `max(outSile.read_force(total=True))` since
the forces returned in that case are averages on each axis.
Returns
-------
numpy.ndarray or None
returns ``None`` if the forces are not found in the
output, otherwise forces will be returned
The shape of the array will be different depending on the type of forces requested:
- atomic (default): (nMDsteps, nAtoms, 3)
- total: (nMDsteps, 3)
- max: (nMDsteps, )
If `all` is `False`, the first dimension does not exist. In the case of max, the returned value
will therefore be just a float, not an array.
If `total` and `max` are both `True`, they are returned separately as a tuple: ``(total, max)``
"""
if all:
last = False
# Read until forces are found
def next_force():
line = self.readline()
while not 'siesta: Atomic forces' in line:
line = self.readline()
if line == '':
return None
# Now read data
F = []
line = self.readline()
if 'siesta:' in line:
# This is the final summary, we don't need to read it as it does not contain new information
# and also it make break things since max forces are not written there
return None
# First, we encounter the atomic forces
while '---' not in line:
line = line.split()
if not (total or max):
F.append([float(x) for x in line[-3:]])
line = self.readline()
if line == '':
break
line = self.readline()
# Then, the total forces
if total:
F = [float(x) for x in line.split()[-3:]]
line = self.readline()
#And after that we can read the max force
if max and len(line.split()) != 0:
line = self.readline()
maxF = float(line.split()[1])
# In case total is also requested, we are going to store it all in the same variable
# It will be separated later
if total:
F = (*F, maxF)
else:
F = maxF
return _a.arrayd(F)
def return_forces(Fs):
# Handle cases where we can't now if they are found
if Fs is None: return None
Fs = _a.arrayd(Fs)
if max and total:
return (Fs[..., :-1], Fs[..., -1])
elif max and not all:
return Fs.ravel()[0]
return Fs
if all or last:
# list of all forces
Fs = []
while True:
F = next_force()
if F is None:
break
Fs.append(F)
if last:
return return_forces(Fs[-1])
return return_forces(Fs)
return return_forces(next_force())
@sile_fh_open()
def read_stress(self, key='static', last=True, all=False):
""" Reads the stresses from the Siesta output file
Parameters
----------
key : {'static', 'total'}
which stress to read from the output.
last: bool, optional
only read the last stress
all: bool, optional
return a list of all stresses (like an MD)
If `True` `last` is ignored
Returns
-------
numpy.ndarray or None
returns ``None`` if the stresses are not found in the
output, otherwise stresses will be returned
"""
if all:
last = False
# Read until stress are found
def next_stress():
line = self.readline()
while not ('siesta: Stress tensor' in line and key in line):
line = self.readline()
if line == '':
return None
# Now read data
S = []
for _ in range(3):
line = self.readline().split()
S.append([float(x) for x in line[-3:]])
return _a.arrayd(S)
# list of all stresses
Ss = []
if all or last:
while True:
S = next_stress()
if S is None:
break
Ss.append(S)
if last:
return Ss[-1]
if self.job_completed and key == 'static':
return Ss[:-1]
return Ss
return next_stress()
@sile_fh_open()
def read_moment(self, orbitals=False, quantity='S', last=True, all=False):
""" Reads the moments from the Siesta output file
These will only be present in case of spin-orbit coupling.
Parameters
----------
orbitals: bool, optional
return a table with orbitally resolved
moments.
quantity: {'S', 'L'}, optional
return the spin-moments or the L moments
last: bool, optional
only read the last force
all: bool, optional
return a list of all forces (like an MD)
If `True` `last` is ignored
"""
# Read until outcoor is found
itt = iter(self)
for line in itt:
if 'moments: Atomic' in line:
break
if not 'moments: Atomic' in line:
return None
# The moments are printed in SPECIES list
next(itt) # empty
next(itt) # empty
na = 0
# Loop the species
tbl = []
# Read the species label
while True:
next(itt) # ""
next(itt) # Atom Orb ...
# Loop atoms in this species list
while True:
line = next(itt)
if line.startswith('Species') or \
line.startswith('--'):
break
line = ' '
atom = []
ia = 0
while not line.startswith('--'):
line = next(itt).split()
if ia == 0:
ia = int(line[0])
elif ia != int(line[0]):
raise ValueError("Error in moments formatting.")
# Track maximum number of atoms
na = max(ia, na)
if quantity == 'S':
atom.append([float(x) for x in line[4:7]])
elif quantity == 'L':
atom.append([float(x) for x in line[7:10]])
line = next(itt).split() # Total ...
if not orbitals:
ia = int(line[0])
if quantity == 'S':
atom.append([float(x) for x in line[4:7]])
elif quantity == 'L':
atom.append([float(x) for x in line[8:11]])
tbl.append((ia, atom))
if line.startswith('--'):
break
# Sort according to the atomic index
moments = [] * na
# Insert in the correct atomic
for ia, atom in tbl:
moments[ia-1] = atom
if not all:
return _a.arrayd(moments)
return moments
@sile_fh_open()
def read_energy(self):
""" Reads the final energy distribution """
itt = iter(self)
for line in itt:
if 'siesta: Final energy' in line:
break
if not 'siesta: Final energy' in line:
return None
# Read data
line = next(itt)
name_conv = {
"Band Struct.": "Ebs",
"Kinetic": "Ekin",
"Hartree": "Ehartree",
"Eldau": "Eldau",
"Eso": "Eso",
"Ext. field": "EextE",
"Exch.-corr.": "Exc",
"Ion-electron": "Eion_elec",
"Ion-ion": "Eion_ion",
"Ekinion": "Ekin_ion",
"Total": "Etot",
"Fermi": "Ef",
"Enegf": "Enegf",
}
out = {}
while len(line.strip()) > 0:
key, val = line.split("=")
key = key.split(":")[1].strip()
out[name_conv[key]] = float(val)
line = next(itt)
return out
def read_data(self, *args, **kwargs):
""" Read specific content in the Siesta out file
The currently implemented things are denoted in
the parameters list.
Note that the returned quantities are in the order
of keywords, so:
>>> read_data(geometry=True, force=True)
<geometry>, <force>
>>> read_data(force=True, geometry=True)
<force>, <geometry>
Parameters
----------
geometry: bool, optional
read geometry, args are passed to `read_geometry`
force: bool, optional
read force, args are passed to `read_force`
stress: bool, optional
read stress, args are passed to `read_stress`
moment: bool, optional
read moment, args are passed to `read_moment` (only for spin-orbit calculations)
energy: bool, optional
read final energies, args are passed to `read_energy`
"""
run = []
# This loops ensures that we preserve the order of arguments
# From Py3.6 and onwards the **kwargs is an OrderedDictionary
for kw in kwargs.keys():
if kw in ['geometry', 'force', 'moment', 'stress', 'energy']:
if kwargs[kw]:
run.append(kw)
# Clean running names
for name in run:
kwargs.pop(name)
val = []
for name in run:
val.append(getattr(self, 'read_{}'.format(name.lower()))(*args, **kwargs))
if len(val) == 0:
return None
elif len(val) == 1:
val = val[0]
return val
@sile_fh_open()
def read_scf(self, key="scf", iscf=-1, imd=None, as_dataframe=False):
r""" Parse SCF information and return a table of SCF information depending on what is requested
Parameters
----------
key : {'scf', 'ts-scf'}
parse SCF information from Siesta SCF or TranSiesta SCF
iscf : int, optional
which SCF cycle should be stored. If ``-1`` only the final SCF step is stored,
for None *all* SCF cycles are returned. When `iscf` values queried are not found they
will be truncated to the nearest SCF step.
imd: int or None, optional
whether only a particular MD step is queried, if None, all MD steps are
parsed and returned. A negative number wraps for the last MD steps.
as_dataframe: boolean, optional
whether the information should be returned as a `pandas.DataFrame`. The advantage of this
format is that everything is indexed and therefore you know what each value means.You can also
perform operations very easily on a dataframe.
"""
#These are the properties that are written in SIESTA scf
props = ["iscf", "Eharris", "E_KS", "FreeEng", "dDmax", "Ef", "dHmax"]
if not iscf is None:
if iscf == 0:
raise ValueError(f"{self.__class__.__name__}.read_scf requires iscf argument to *not* be 0!")
if not imd is None:
if imd == 0:
raise ValueError(f"{self.__class__.__name__}.read_scf requires imd argument to *not* be 0!")
def reset_d(d, line):
if line.startswith('SCF cycle converged'):
if len(d['data']) > 0:
d['_final_iscf'] = 1
elif line.startswith('SCF cycle continued'):
d['_final_iscf'] = 0
def common_parse(line, d):
if line.startswith('ts-Vha:'):
d['ts-Vha'] = float(line.split()[1])
elif line.startswith('bulk-bias: |v'):
d['bb-v'] = list(map(float, line.split()[-3:]))
if 'bb-vx' not in props:
props.extend(['BB-vx', 'BB-vy', 'BB-vz'])
elif line.startswith('bulk-bias: {q'):
d['bb-q'] = list(map(float, line.split()[-3:]))
if 'bb-q+' not in props:
props.extend(['BB-q+', 'BB-q-', 'BB-q0'])
else:
return False
return True
if key.lower() == 'scf':
def parse_next(line, d):
line = line.strip().replace('*', '0')
reset_d(d, line)
if common_parse(line, d):
pass
elif line.startswith('scf:'):
d['_found_iscf'] = True
if len(line) == 97:
# this should be for Efup/dwn
# but I think this will fail for as_dataframe (TODO)
data = [int(line[5:9]), float(line[9:25]), float(line[25:41]),
float(line[41:57]), float(line[57:67]), float(line[67:77]),
float(line[77:87]), float(line[87:97])]
elif len(line) == 87:
data = [int(line[5:9]), float(line[9:25]), float(line[25:41]),
float(line[41:57]), float(line[57:67]), float(line[67:77]),
float(line[77:87])]
else:
# Populate DATA by splitting
data = line.split()
data = [int(data[1])] + list(map(float, data[2:]))
d['data'] = data
elif key.lower() == 'ts-scf':
props.append("ts-Vha")
def parse_next(line, d):
line = line.strip().replace('*', '0')
reset_d(d, line)
if common_parse(line, d):
pass
elif line.startswith('ts-q:'):
data = line.split()[1:]
try:
d['ts-q'] = list(map(float, data))
except:
# We are probably reading a device list
# ensure that props are appended
if data[-1] not in props:
props.extend(data)
pass
elif line.startswith('ts-scf:'):
d['_found_iscf'] = True
if len(line) == 100:
data = [int(line[8:12]), float(line[12:28]), float(line[28:44]),
float(line[44:60]), float(line[60:70]), float(line[70:80]),
float(line[80:90]), float(line[90:100]), d['ts-Vha']] + d['ts-q']
elif len(line) == 90:
data = [int(line[8:12]), float(line[12:28]), float(line[28:44]),
float(line[44:60]), float(line[60:70]), float(line[70:80]),
float(line[80:90]), d['ts-Vha']] + d['ts-q']
else:
# Populate DATA by splitting
data = line.split()
data = [int(data[1])] + list(map(float, data[2:])) + [d['ts-Vha']] + d['ts-q']
d['data'] = data
# A temporary dictionary to hold information while reading the output file
d = {
'_found_iscf': False,
'_final_iscf': 0,
'data': [],
}
md = []
scf = []
for line in self:
parse_next(line, d)
if d['_found_iscf']:
d['_found_iscf'] = False
data = d['data']
if len(data) == 0:
continue
if iscf is None or iscf < 0:
scf.append(data)
elif data[0] <= iscf:
# this ensures we will retain the latest iscf in
# case the requested iscf is too big
scf = data
if d['_final_iscf'] == 1:
d['_final_iscf'] = 2
elif d['_final_iscf'] == 2:
d['_final_iscf'] = 0
data = d['data']
if len(data) == 0:
# this traps the case where we read ts-scf
# but find the final scf iteration.
# In that case we don't have any data.
scf = []
continue
if len(scf) == 0:
# this traps cases where final_iscf has
# been trickered but we haven't collected anything.
# I.e. if key == scf but ts-scf also exists.
continue
# First figure out which iscf we should store
if iscf is None: # or iscf > 0
# scf is correct
pass
elif iscf < 0:
# truncate to 0
scf = scf[max(len(scf) + iscf, 0)]
# Populate md
md.append(np.array(scf))
# Reset SCF data
scf = []
# In case we wanted a given MD step and it's this one, just stop reading
# We are going to return the last MD (see below)
if imd == len(md):
break
# Define the function that is going to convert the information of a MDstep to a Dataset
if as_dataframe:
import pandas as pd
def MDstep_dataframe(scf):
scf = np.atleast_2d(scf)
return pd.DataFrame(
scf[..., 1:],
index=pd.Index(scf[..., 0].ravel().astype(np.int32),
name="iscf"),
columns=props[1:]
)
# Now we know how many MD steps there are
# We will return stuff based on what the user requested
# For pandas DataFrame this will be dependent
# 1. all MD steps requested => imd == index, iscf == column (regardless of iscf==none|int)
# 2. 1 MD step requested => iscf == index
if imd is None:
if as_dataframe:
if len(md) == 0:
# return an empty dataframe (with imd as index)
return pd.DataFrame(index=pd.Index([], name="imd"),
columns=props)
# Regardless of what the user requests we will always have imd == index
# and iscf a column, a user may easily change this.
df = pd.concat(map(MDstep_dataframe, md),
keys=_a.arangei(1, len(md) + 1), names=["imd"])
if iscf is not None:
df.reset_index("iscf", inplace=True)
return df
if iscf is None:
# since each MD step may be a different number of SCF steps
# we cannot convert to a dense array
return md
return np.array(md)
# correct imd to ensure we check against the final size
imd = min(len(md) - 1, max(len(md) + imd, 0))
if len(md) == 0:
# no data collected
if as_dataframe:
return pd.DataFrame(index=pd.Index([], name="iscf"),
columns=props[1:])
return np.array(md[imd])
if imd > len(md):
raise ValueError(f"{self.__class__.__name__}.read_scf could not find requested MD step ({imd}).")
# If a certain imd was requested, get it
# Remember that if imd is positive, we stopped reading at the moment we reached it
scf = np.array(md[imd])
if as_dataframe:
return MDstep_dataframe(scf)
return scf
add_sile('out', outSileSiesta, case=False, gzip=True)
| lgpl-3.0 |
dcprojects/CoolProp | dev/scripts/check_hs.py | 5 | 2567 | from CoolProp.Plots.Plots import hs
import CoolProp
from CoolProp.CoolProp import Props
import matplotlib.pyplot as plt
import numpy as np
Fluid = 'Nitrogen'
fig = plt.figure()
ax = fig.add_subplot(111)
for Fluid in CoolProp.__fluids__:
if Fluid == 'SES36':
continue
ax.cla()
h_crit = Props('H','T',Props(Fluid,"Tcrit"),'D',Props(Fluid,"rhocrit"),Fluid)
s_crit = Props('S','T',Props(Fluid,"Tcrit"),'D',Props(Fluid,"rhocrit"),Fluid)
hL_Tmin = Props('H','T',Props(Fluid,"Tmin"),'Q',0,Fluid)
hV_Tmin = Props('H','T',Props(Fluid,"Tmin"),'Q',1,Fluid)
sL_Tmin = Props('S','T',Props(Fluid,"Tmin"),'Q',0,Fluid)
sV_Tmin = Props('S','T',Props(Fluid,"Tmin"),'Q',1,Fluid)
hs(Fluid, axis = ax)
plt.plot(s_crit,h_crit,'rd')
plt.plot([sL_Tmin,sV_Tmin],[hL_Tmin,hV_Tmin],'--')
plt.gca().axhline(h_crit)
plt.gca().axhline(hV_Tmin)
# Two-Phase
for T in np.linspace(Props(Fluid,"Tmin")+0.1,Props(Fluid,"Tcrit")-1e-3,30):
for Q in np.linspace(0, 1, 30):
try:
h = Props("H",'Q',Q,'T',T,Fluid)
s = Props("S",'Q',Q,'T',T,Fluid)
T = Props("T",'S',s,'H',h,Fluid)
#ax.plot(s,h,'o',mfc='none')
except ValueError as VE:
print T, Q , '|||', '"T","S",',s,',"H",',h,',"'+Fluid+'"', '|||', VE
ax.plot(s,h,'o',mfc='none')
for h in np.linspace(hL_Tmin, hV_Tmin + 1500,100):
for s in np.linspace(sL_Tmin+0.01,sV_Tmin,100):
try:
h_pmax = Props('H','S',s,'P',6*Props(Fluid,'pcrit'),Fluid)
except ValueError:
h_pmax = 0
htriple_s = (hV_Tmin-hL_Tmin)/(sV_Tmin-sL_Tmin)*(s-sL_Tmin)+hL_Tmin
if h < htriple_s or h > h_pmax: continue
try:
T = Props("T",'S',s,'H',h,Fluid)
#ax.plot(s,h,'o',mfc='none',ms=6)
except ValueError:
ax.plot(s,h,'s',mfc='none')
## if Fluid =='Propane':
## ps = Props("P",'T',300,'Q',0,Fluid);
## hL = Props("H",'Q',0,'T',300,Fluid);
## sL = Props("S",'Q',0,'T',300,Fluid);
## h = Props("H",'P',ps,'T',299.5,Fluid);
## s = Props("S",'P',ps,'T',299.5,Fluid);
## print s,h,sL,hL
## plt.plot(s,h,'o')
## plt.plot(sL,hL,'d')
## plt.gca().axvline(s)
## plt.gca().axhline(268.75968916316691)
fig.savefig('figs/'+Fluid+'.png',dpi=200)
fig.savefig('figs/'+Fluid+'.pdf')
| mit |
spallavolu/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
sekikn/incubator-airflow | airflow/providers/presto/hooks/presto.py | 6 | 7381 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from typing import Any, Iterable, Optional
import prestodb
from prestodb.exceptions import DatabaseError
from prestodb.transaction import IsolationLevel
from airflow import AirflowException
from airflow.configuration import conf
from airflow.hooks.dbapi import DbApiHook
from airflow.models import Connection
class PrestoException(Exception):
"""Presto exception"""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'false':
return False
elif value.lower() == 'true':
return True
return value
class PrestoHook(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
conn_type = 'presto'
hook_name = 'Presto'
def get_conn(self) -> Connection:
"""Returns a connection object"""
db = self.get_connection(
self.presto_conn_id # type: ignore[attr-defined] # pylint: disable=no-member
)
extra = db.extra_dejson
auth = None
if db.password and extra.get('auth') == 'kerberos':
raise AirflowException("Kerberos authorization doesn't support password.")
elif db.password:
auth = prestodb.auth.BasicAuthentication(db.login, db.password)
elif extra.get('auth') == 'kerberos':
auth = prestodb.auth.KerberosAuthentication(
config=extra.get('kerberos__config', os.environ.get('KRB5_CONFIG')),
service_name=extra.get('kerberos__service_name'),
mutual_authentication=_boolify(extra.get('kerberos__mutual_authentication', False)),
force_preemptive=_boolify(extra.get('kerberos__force_preemptive', False)),
hostname_override=extra.get('kerberos__hostname_override'),
sanitize_mutual_error_response=_boolify(
extra.get('kerberos__sanitize_mutual_error_response', True)
),
principal=extra.get('kerberos__principal', conf.get('kerberos', 'principal')),
delegate=_boolify(extra.get('kerberos__delegate', False)),
ca_bundle=extra.get('kerberos__ca_bundle'),
)
presto_conn = prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get('source', 'airflow'),
http_scheme=db.extra_dejson.get('protocol', 'http'),
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level(), # type: ignore[func-returns-value]
)
if extra.get('verify') is not None:
# Unfortunately verify parameter is available via public API.
# The PR is merged in the presto library, but has not been released.
# See: https://github.com/prestosql/presto-python-client/pull/31
presto_conn._http_session.verify = _boolify(extra['verify']) # pylint: disable=protected-access
return presto_conn
def get_isolation_level(self) -> Any:
"""Returns an isolation level"""
db = self.get_connection(
self.presto_conn_id # type: ignore[attr-defined] # pylint: disable=no-member
)
isolation_level = db.extra_dejson.get('isolation_level', 'AUTOCOMMIT').upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
@staticmethod
def _strip_sql(sql: str) -> str:
return sql.strip().rstrip(';')
def get_records(self, hql, parameters: Optional[dict] = None):
"""Get a set of records from Presto"""
try:
return super().get_records(self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_first(self, hql: str, parameters: Optional[dict] = None) -> Any:
"""Returns only the first row, regardless of how many rows the query returns."""
try:
return super().get_first(self._strip_sql(hql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_pandas_df(self, hql, parameters=None, **kwargs):
"""Get a pandas dataframe from a sql query."""
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(hql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data, **kwargs)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame(**kwargs)
return df
def run(
self,
hql,
autocommit: bool = False,
parameters: Optional[dict] = None,
) -> None:
"""Execute the statement against Presto. Can be used to create views."""
return super().run(sql=self._strip_sql(hql), parameters=parameters)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Optional[Iterable[str]] = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
'Transactions are not enable in presto connection. '
'Please use the isolation_level property to enable it. '
'Falling back to insert all rows in one transaction.'
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
| apache-2.0 |
walterreade/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
rishikksh20/scikit-learn | sklearn/cluster/spectral.py | 19 | 18536 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
wkfwkf/statsmodels | examples/python/robust_models_0.py | 33 | 2992 |
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
| bsd-3-clause |
jkarnows/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
heli522/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
arongdari/almc | almc/bayesian_rescal/brescal.py | 1 | 14431 | import logging
import time
import itertools
import numpy as np
import concurrent.futures
from numpy.random import multivariate_normal, gamma
from sklearn.metrics import mean_squared_error
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
_E_ALPHA = 1.
_E_BETA = 1.
_R_ALPHA = 1.
_R_BETA = 1.
_P_SAMPLE_GAP = 5
_P_SAMPLE = False
_PARALLEL = True
_MAX_THREAD = 4
_VAR_E = 1.
_VAR_R = 1.
_VAR_X = 0.01
_DEST = ''
def gen_random_tensor(n_dim, n_entity, n_relation, var_e=1., var_r=1, var_x=0.01):
"""
Generate a random tensor following the generative process of BRESCAL
Parameters
----------
n_dim: int
latent dimensionaitly of entities and relations
n_entity: int
number of entities to be generated
n_relation: int
number of relations to be generated
var_e: float
isotropic variance of entity vector
var_r: float
isotropic variance of relation matrix
var_x: float
variance of triple
Returns
-------
array, shape (n_relation, n_entity, n_entity)
"""
e_mean = np.zeros(n_dim)
r_mean = np.zeros(n_dim ** 2)
E = np.random.multivariate_normal(e_mean, np.identity(n_dim) * var_e, size=n_entity)
R = np.zeros([n_relation, n_dim, n_dim])
T = np.zeros([n_relation, n_entity, n_entity])
for k in range(n_relation):
R[k] = np.random.multivariate_normal(r_mean, np.identity(n_dim ** 2) * var_r).reshape(n_dim, n_dim)
for k in range(n_relation):
ERET = np.dot(np.dot(E, R[k]), E.T)
for i, j in itertools.product(range(n_entity), repeat=2):
T[k, i, j] = np.random.normal(ERET[i, j], var_x)
return T
class BayesianRescal:
"""
Bayesian RESCAL
generalisation of RESCAL (Nickel et.al., ICML 2011) with prior over latent features
"""
def __init__(self, n_dim, compute_score=True, controlled_var=False, obs_var=0.01, unobs_var=10.,
eval_fn=mean_squared_error, **kwargs):
"""
Parameters
----------
n_dim: int
latent dimensionality of the model
compute_score: boolean, optional
Compute the joint likelihood of the model
controlled_var: boolean, optional
Controlled variance approach. Place different variances on observed and unobserved triples
obs_var: float, optional
variance of observed triples
unobs_var: float, optional
variance of unobserved triples, only applied when `controlled_var` is True
eval_fn: `sklearn.metric.mean_squred_error`
Evaluate the current fit of the model based on `eval_fn`. By default, the model compute the
mean squared error between current observation and reconstructed tensor.
sample_prior: boolean, optional
If True, sample priors of the variance `var_e` and `var_r`
sample_prior_gap: int
If `sample_prior` is True, then sample priors every `sample_prior_gap` iterations
e_alpha: float
shape parameter of the prior of `var_e`
e_beta: float
scale parameter of the prior of `var_e`
r_alpha: float
shape parameter of the prior of `var_r`
r_beta: float
scale parameter of the prior of `var_r`
parallel: boolean
If True, use multi-thread to sample relations
max_thread: int
maximum number of threads
dest: str
destination path of the output model file
Returns
-------
"""
self.n_dim = n_dim
self.var_e = kwargs.pop('var_e', _VAR_E)
self.var_r = kwargs.pop('var_r', _VAR_R)
self.var_x = kwargs.pop('var_x', _VAR_X)
self.compute_score = compute_score
self.sample_prior = kwargs.pop('sample_prior', _P_SAMPLE)
self.prior_sample_gap = kwargs.pop('prior_sample_gap', _P_SAMPLE_GAP)
self.e_alpha = kwargs.pop('e_alpha', _E_ALPHA)
self.e_beta = kwargs.pop('e_beta', _E_BETA)
self.r_alpha = kwargs.pop('r_alpha', _R_ALPHA)
self.r_beta = kwargs.pop('r_beta', _R_BETA)
self.parallelize = kwargs.pop('parallel', _PARALLEL)
self.max_thread = kwargs.pop('max_thread', _MAX_THREAD)
self.dest = kwargs.pop('dest', _DEST)
self.controlled_var = controlled_var
self.obs_var = obs_var
self.unobs_var = unobs_var
self.eval_fn = eval_fn
def __getstate__(self):
"""Remove `var_X` when save model
"""
d = dict(self.__dict__)
if self.controlled_var:
del d['var_X']
return d
def fit(self, X, max_iter=100):
"""Infer vector and matrix embedding of entities and relations, respectively
Parameters
----------
X: ndarray, shape (n_relations, n_entities, n_entities)
Full tensor to be factorised.
max_iter: int
maximum number of Gibbs sampling iterations
"""
self.n_relations = X.shape[0]
self.n_entities = X.shape[1]
self.E = np.random.random([self.n_entities, self.n_dim])
self.R = np.random.random([self.n_relations, self.n_dim, self.n_dim])
# for controlled variance
if self.controlled_var:
self.var_X = np.ones_like(X) * self.obs_var
self.var_X[X == 0] = self.unobs_var
self._gibbs(X, max_iter)
if len(self.dest) > 0:
self._save_model()
def _gibbs(self, X, max_iter):
"""Gibbs sampling for entities and relations
Parameters
----------
X: ndarray, shape (n_relations, n_entities, n_entities)
Full tensor to be fatorised
max_iter:
maximum number of Gibbs sampling iterations
"""
logger.info("[INIT] LL: %.3f | fit: %0.5f", self.score(X), self._compute_fit(X))
for i in range(max_iter):
tic = time.time()
self._sample_entities(X, self.E, self.R)
self._sample_relations(X, self.E, self.R)
if self.sample_prior and (i + 1) % self.prior_sample_gap == 0:
self._sample_prior()
toc = time.time()
if self.compute_score:
_score = self.score(X)
_fit = self._compute_fit(X)
logger.info("[%3d] LL: %.3f | fit: %0.5f | sec: %.3f", i, _score, _fit, (toc - tic))
else:
logger.info("[%3d] sec: %.3f", i, (toc - tic))
def _sample_prior(self):
self._sample_var_r()
self._sample_var_e()
def _sample_var_r(self):
self.var_r = 1. / gamma(0.5 * self.n_relations * self.n_dim * self.n_dim + self.r_alpha,
1. / (0.5 * np.sum(self.R ** 2) + self.r_beta))
logger.debug("Sampled var_r %.3f", self.var_r)
def _sample_var_e(self):
self.var_e = 1. / gamma(0.5 * self.n_entities * self.n_dim + self.e_alpha,
1. / (0.5 * np.sum(self.E ** 2) + self.e_beta))
logger.debug("Sampled var_e %.3f", self.var_e)
def _sample_entities(self, X, E, R):
"""Sample entity vectors
Parameters
----------
X: ndarray, shape (n_relations, n_entities, n_entities)
full tensor
E: ndarray, shape (n_entities, n_dim)
R: ndarray, shape (n_relations, n_dim, n_dim)
Returns
-------
E: ndarray, shape (n_entities, n_dim)
returns newly sampled entities
"""
for i in range(self.n_entities):
self._sample_entity(X, E, R, i)
return E
def _sample_entity(self, X, E, R, i):
E[i] *= 0
_lambda = np.zeros([self.n_dim, self.n_dim])
xi = np.zeros(self.n_dim)
if self.controlled_var:
for k in range(self.n_relations):
tmp = np.dot(R[k], E.T) # D x E
tmp2 = np.dot(R[k].T, E.T)
_lambda += np.dot(tmp * (1. / self.var_X[k, i, :]), tmp.T) \
+ np.dot(tmp2 * (1. / self.var_X[k, :, i]), tmp2.T)
xi += np.sum((1. / self.var_X[k, i, :]) * X[k, i, :] * tmp, 1) \
+ np.sum((1. / self.var_X[k, :, i]) * X[k, :, i] * tmp2, 1)
_lambda += (1. / self.var_e) * np.identity(self.n_dim)
inv_lambda = np.linalg.inv(_lambda)
mu = np.dot(inv_lambda, xi)
else:
for k in range(self.n_relations):
tmp = np.dot(R[k], E.T) # D x E
tmp2 = np.dot(R[k].T, E.T)
_lambda += np.dot(tmp, tmp.T) + np.dot(tmp2, tmp2.T)
xi += np.sum(X[k, i, :] * tmp, 1) + np.sum(X[k, :, i] * tmp2, 1)
xi *= (1. / self.var_x)
_lambda *= 1. / self.var_x
_lambda += (1. / self.var_e) * np.identity(self.n_dim)
inv_lambda = np.linalg.inv(_lambda)
mu = np.dot(inv_lambda, xi)
E[i] = multivariate_normal(mu, inv_lambda)
return E[i]
def _sample_relation(self, X, E, R, k, EXE, inv_lambda=None):
"""Sample relation matrix
Parameters
----------
X: ndarray, shape (n_relations, n_entities, n_entities)
E: ndarray, shape (n_entities, n_dim)
R: ndarray, shape (n_relations, n_dim, n_dim)
k: int
relation to be sampled
EXE: ndarray, shape (n_entities * n_entities, n_dim * n_dim)
kronecker product between entities
inv_lambda:ndarray, shape (n_dim * n_dim, n_dim * n_dim)
inverse of covariance matrix
Returns
-------
R[k]: ndarray, shape (n_dim, n_dim)
returns newly sampled relation matrix R
k: int
index of sampled relation
"""
if not self.controlled_var:
xi = np.sum(EXE * X[k].flatten()[:, np.newaxis], 0)
mu = (1. / self.var_x) * np.dot(inv_lambda, xi)
R[k] = multivariate_normal(mu, inv_lambda).reshape([self.n_dim, self.n_dim])
else:
tmp = EXE * (1. / self.var_X[k, :, :].flatten()[:, np.newaxis])
_lambda = np.dot(tmp.T, EXE)
_lambda += (1. / self.var_r) * np.identity(self.n_dim ** 2)
inv_lambda = np.linalg.inv(_lambda)
xi = np.sum(EXE * X[k].flatten()[:, np.newaxis] * (1. / self.var_X[k, :, :].flatten()[:, np.newaxis]), 0)
mu = np.dot(inv_lambda, xi)
R[k] = multivariate_normal(mu, inv_lambda).reshape([self.n_dim, self.n_dim])
return R[k], k
def _sample_relations(self, X, E, R):
"""Sample relation matrices
Parameters
----------
X: ndarray, shape (n_relations, n_entities, n_entities)
E: ndarray, shape (n_entities, n_dim)
R: ndarray, shape (n_relations, n_dim, n_dim)
Returns
-------
R: ndarray, shape (n_relations, n_dim, n_dim)
returns newly sampled relation matrices
"""
EXE = np.kron(E, E)
if not self.controlled_var:
_lambda = np.dot(EXE.T, EXE) # D^2 x D^2
_lambda *= (1. / self.var_x)
_lambda += (1. / self.var_r) * np.identity(self.n_dim ** 2)
inv_lambda = np.linalg.inv(_lambda)
if self.parallelize:
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_thread) as executor:
fs = [executor.submit(self._sample_relation, X, E, R, k, EXE, inv_lambda) for k in
range(self.n_relations)]
concurrent.futures.wait(fs)
else:
[self._sample_relation(X, E, R, k, EXE, inv_lambda) for k in range(self.n_relations)]
else:
if self.parallelize:
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_thread) as executor:
fs = [executor.submit(self._sample_relation, X, E, R, k, EXE) for k in range(self.n_relations)]
concurrent.futures.wait(fs)
else:
[self._sample_relation(X, E, R, k, EXE) for k in range(self.n_relations)]
return R
def _reconstruct(self):
""" Reconstruct tensor by the current model parameters
Returns
-------
_X: ndarray, shape (n_relations, n_entities, n_entities)
Reconstructed tensor
"""
_X = np.zeros([self.n_relations, self.n_entities, self.n_entities])
for k in range(self.n_relations):
_X[k] = np.dot(np.dot(self.E, self.R[k]), self.E.T)
return _X
def score(self, X):
from scipy.stats import norm, multivariate_normal
if not hasattr(self, 'n_relations'):
self.n_entities, self.n_relations, _ = X.shape
score = 0
for k in range(self.n_relations):
mean = np.dot(np.dot(self.E, self.R[k]), self.E.T)
if self.controlled_var:
score += np.sum(norm.logpdf(X[k].flatten(), mean.flatten(), np.sqrt(self.var_X[k].flatten())))
else:
score += np.sum(norm.logpdf(X[k].flatten(), mean.flatten(), np.sqrt(self.var_x)))
score += np.sum(norm.logpdf(self.R[k].flatten(), 0, np.sqrt(self.var_r)))
for i in range(self.n_entities):
score += multivariate_normal.logpdf(self.E[i], np.zeros(self.n_dim), np.identity(self.n_dim) * self.var_e)
if self.sample_prior:
score += (self.e_alpha - 1.) * np.log(self.var_e) - self.e_beta * self.var_e
score += (self.r_alpha - 1.) * np.log(self.var_r) - self.r_beta * self.var_r
return score
def _compute_fit(self, X):
"""Compute reconstruction error
Parameters
----------
X: ndarray, shape (n_relations, n_entities, n_entities)
Returns
-------
_fit: float
returns the evaluation error between reconstructed tensor and `X`
based on `eval_fn`
"""
_X = self._reconstruct()
_fit = self.eval_fn(X.flatten(), _X.flatten())
return _fit
def _save_model(self):
"""Save the current parameters of the model
"""
import pickle
with open(self.dest, 'wb') as f:
pickle.dump(self, f)
| gpl-2.0 |
KasperPRasmussen/bokeh | examples/charts/file/scatter_multi.py | 4 | 2207 |
import pandas as pd
from bokeh.charts import Scatter, output_file, show, vplot, hplot, defaults
from bokeh.charts.operations import blend
from bokeh.charts.utils import df_from_json
from bokeh.sampledata.autompg import autompg as df
from bokeh.sampledata.iris import flowers
from bokeh.sampledata.olympics2014 import data
defaults.plot_width = 450
defaults.plot_height = 400
scatter0 = Scatter(
df, x='mpg', title="x='mpg'", xlabel="Miles Per Gallon")
scatter1 = Scatter(
df, x='mpg', y='hp', title="x='mpg', y='hp'",
xlabel="Miles Per Gallon", ylabel="Horsepower", legend='top_right')
scatter2 = Scatter(
df, x='mpg', y='hp', color='cyl', title="x='mpg', y='hp', color='cyl'",
xlabel="Miles Per Gallon", ylabel="Horsepower", legend='top_right')
scatter3 = Scatter(
df, x='mpg', y='hp', color='origin', title="x='mpg', y='hp', color='origin', "
"with tooltips",
xlabel="Miles Per Gallon", ylabel="Horsepower",
legend='top_right', tooltips=[('origin', "@origin")])
scatter4 = Scatter(
df, x='mpg', y='hp', color='cyl', marker='origin', title="x='mpg', y='hp', color='cyl', marker='origin'",
xlabel="Miles Per Gallon", ylabel="Horsepower", legend='top_right')
# Example with nested json/dict like data, which has been pre-aggregated and pivoted
df2 = df_from_json(data)
df2 = df2.sort('total', ascending=False)
df2 = df2.head(10)
df2 = pd.melt(df2, id_vars=['abbr', 'name'])
scatter5 = Scatter(
df2, x='value', y='name', color='variable', title="x='value', y='name', color='variable'",
xlabel="Medals", ylabel="Top 10 Countries", legend='bottom_right')
scatter6 = Scatter(flowers, x=blend('petal_length', 'sepal_length', name='length'),
y=blend('petal_width', 'sepal_width', name='width'), color='species',
title='x=petal_length+sepal_length, y=petal_width+sepal_width, color=species',
legend='top_right')
scatter6.title_text_font_size = '10pt'
output_file("scatter_multi.html", title="scatter_multi.py example")
show(vplot(
hplot(scatter0, scatter1),
hplot(scatter2, scatter3),
hplot(scatter4, scatter5),
hplot(scatter6)
))
| bsd-3-clause |
wasade/networkx | examples/algorithms/blockmodel.py | 12 | 3014 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <[email protected]>',
'Aric Hagberg <[email protected]>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H = next(nx.connected_component_subgraphs(G))
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
simon-pepin/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
larsoner/mne-python | mne/decoding/csp.py | 3 | 34992 | # -*- coding: utf-8 -*-
# Authors: Romain Trachel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Alexandre Barachant <[email protected]>
# Clemens Brunner <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from scipy import linalg
from .base import BaseEstimator
from .mixin import TransformerMixin
from ..cov import _regularized_covariance
from ..utils import fill_doc, _check_option, _validate_type
@fill_doc
class CSP(TransformerMixin, BaseEstimator):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This class can be used as a supervised decomposition to estimate spatial
filters for feature extraction. CSP in the context of EEG was first
described in :footcite:`KolesEtAl1990`; a comprehensive tutorial on CSP can
be found in :footcite:`BlankertzEtAl2008`. Multi-class solving is
implemented from :footcite:`Grosse-WentrupBuss2008`.
Parameters
----------
n_components : int (default 4)
The number of components to decompose M/EEG signals. This number should
be set by cross-validation.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow regularization
for covariance estimation. If float (between 0 and 1), shrinkage is
used. For str values, ``reg`` will be passed as ``method`` to
:func:`mne.compute_covariance`.
log : None | bool (default None)
If ``transform_into`` equals ``'average_power'`` and ``log`` is None or
True, then apply a log transform to standardize features, else features
are z-scored. If ``transform_into`` is ``'csp_space'``, ``log`` must be
None.
cov_est : 'concat' | 'epoch' (default 'concat')
If ``'concat'``, covariance matrices are estimated on concatenated
epochs for each class. If ``'epoch'``, covariance matrices are
estimated on each epoch separately and then averaged over each class.
transform_into : 'average_power' | 'csp_space' (default 'average_power')
If 'average_power' then ``self.transform`` will return the average
power of each spatial filter. If ``'csp_space'``, ``self.transform``
will return the data in CSP space.
norm_trace : bool (default False)
Normalize class covariance by its trace. Trace normalization is a step
of the original CSP algorithm :footcite:`KolesEtAl1990` to eliminate
magnitude variations in the EEG between individuals. It is not applied
in more recent work :footcite:`BlankertzEtAl2008`,
:footcite:`Grosse-WentrupBuss2008` and can have a negative impact on
pattern order.
cov_method_params : dict | None
Parameters to pass to :func:`mne.compute_covariance`.
.. versionadded:: 0.16
%(rank_None)s
.. versionadded:: 0.17
component_order : 'mutual_info' | 'alternate' (default 'mutual_info')
If ``'mutual_info'`` order components by decreasing mutual information
(in the two-class case this uses a simplification which orders
components by decreasing absolute deviation of the eigenvalues from 0.5
:footcite:`BarachantEtAl2010`). For the two-class case, ``'alternate'``
orders components by starting with the largest eigenvalue, followed by
the smallest, the second-to-largest, the second-to-smallest, and so on
:footcite:`BlankertzEtAl2008`.
.. versionadded:: 0.21
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP components used to decompose the data, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
mean_ : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_components,)
If fit, the std squared power for each component.
See Also
--------
mne.preprocessing.Xdawn, SPoC
References
----------
.. footbibliography::
"""
def __init__(self, n_components=4, reg=None, log=None, cov_est='concat',
transform_into='average_power', norm_trace=False,
cov_method_params=None, rank=None,
component_order='mutual_info'):
# Init default CSP
if not isinstance(n_components, int):
raise ValueError('n_components must be an integer.')
self.n_components = n_components
self.rank = rank
self.reg = reg
# Init default cov_est
if not (cov_est == "concat" or cov_est == "epoch"):
raise ValueError("unknown covariance estimation method")
self.cov_est = cov_est
# Init default transform_into
self.transform_into = _check_option('transform_into', transform_into,
['average_power', 'csp_space'])
# Init default log
if transform_into == 'average_power':
if log is not None and not isinstance(log, bool):
raise ValueError('log must be a boolean if transform_into == '
'"average_power".')
else:
if log is not None:
raise ValueError('log must be a None if transform_into == '
'"csp_space".')
self.log = log
_validate_type(norm_trace, bool, 'norm_trace')
self.norm_trace = norm_trace
self.cov_method_params = cov_method_params
self.component_order = _check_option('component_order',
component_order,
('mutual_info', 'alternate'))
def _check_Xy(self, X, y=None):
"""Check input data."""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)."
% type(X))
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def fit(self, X, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the CSP.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
self._check_Xy(X, y)
self._classes = np.unique(y)
n_classes = len(self._classes)
if n_classes < 2:
raise ValueError("n_classes must be >= 2.")
if n_classes > 2 and self.component_order == 'alternate':
raise ValueError("component_order='alternate' requires two "
"classes, but data contains {} classes; use "
"component_order='mutual_info' "
"instead.".format(n_classes))
covs, sample_weights = self._compute_covariance_matrices(X, y)
eigen_vectors, eigen_values = self._decompose_covs(covs,
sample_weights)
ix = self._order_components(covs, sample_weights, eigen_vectors,
eigen_values, self.component_order)
eigen_vectors = eigen_vectors[:, ix]
self.filters_ = eigen_vectors.T
self.patterns_ = linalg.pinv2(eigen_vectors)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean power)
X = (X ** 2).mean(axis=2)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times).
"""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)." % type(X))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
if self.transform_into == 'average_power':
X = (X ** 2).mean(axis=2)
log = True if self.log is None else self.log
if log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
@fill_doc
def plot_patterns(self, info, components=None, ch_type=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scalings=None, units='a.u.', res=64,
size=1, cbar_fmt='%3.1f', name_format='CSP%01d',
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None,
sphere=None):
"""Plot topographic patterns of components.
The patterns explain how the measured data was generated from the
neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used for fitting.
If not possible, consider using ``create_info``.
components : float | array of float | None
The patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
vmin : float | callable
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, default np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scalings : dict | float | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
units : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%%01d".
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. Defaults to 6.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(
times=components, ch_type=ch_type,
vmin=vmin, vmax=vmax, cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scalings=scalings, units=units, time_unit='s',
time_format=name_format, size=size, show_names=show_names,
title=title, mask_params=mask_params, mask=mask, outlines=outlines,
contours=contours, image_interp=image_interp, show=show,
average=average, sphere=sphere)
@fill_doc
def plot_filters(self, info, components=None, ch_type=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scalings=None, units='a.u.', res=64,
size=1, cbar_fmt='%3.1f', name_format='CSP%01d',
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None):
"""Plot topographic filters of components.
The filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used for fitting.
If not possible, consider using ``create_info``.
components : float | array of float | None
The patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
vmin : float | callable
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scalings : dict | float | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
units : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%%01d".
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. Defaults to 6.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_.T, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(
times=components, ch_type=ch_type, vmin=vmin,
vmax=vmax, cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors, scalings=scalings, units=units,
time_unit='s', time_format=name_format, size=size,
show_names=show_names, title=title, mask_params=mask_params,
mask=mask, outlines=outlines, contours=contours,
image_interp=image_interp, show=show, average=average)
def _compute_covariance_matrices(self, X, y):
_, n_channels, _ = X.shape
if self.cov_est == "concat":
cov_estimator = self._concat_cov
elif self.cov_est == "epoch":
cov_estimator = self._epoch_cov
covs = []
sample_weights = []
for this_class in self._classes:
cov, weight = cov_estimator(X[y == this_class])
if self.norm_trace:
cov /= np.trace(cov)
covs.append(cov)
sample_weights.append(weight)
return np.stack(covs), np.array(sample_weights)
def _concat_cov(self, x_class):
"""Concatenate epochs before computing the covariance."""
_, n_channels, _ = x_class.shape
x_class = np.transpose(x_class, [1, 0, 2])
x_class = x_class.reshape(n_channels, -1)
cov = _regularized_covariance(
x_class, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank)
weight = x_class.shape[0]
return cov, weight
def _epoch_cov(self, x_class):
"""Mean of per-epoch covariances."""
cov = sum(_regularized_covariance(
this_X, reg=self.reg,
method_params=self.cov_method_params,
rank=self.rank) for this_X in x_class)
cov /= len(x_class)
weight = len(x_class)
return cov, weight
def _decompose_covs(self, covs, sample_weights):
n_classes = len(covs)
if n_classes == 2:
eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0))
else:
# The multiclass case is adapted from
# http://github.com/alexandrebarachant/pyRiemann
eigen_vectors, D = _ajd_pham(covs)
eigen_vectors = self._normalize_eigenvectors(eigen_vectors.T, covs,
sample_weights)
eigen_values = None
return eigen_vectors, eigen_values
def _compute_mutual_info(self, covs, sample_weights, eigen_vectors):
class_probas = sample_weights / sample_weights.sum()
mutual_info = []
for jj in range(eigen_vectors.shape[1]):
aa, bb = 0, 0
for (cov, prob) in zip(covs, class_probas):
tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov),
eigen_vectors[:, jj])
aa += prob * np.log(np.sqrt(tmp))
bb += prob * (tmp ** 2 - 1)
mi = - (aa + (3.0 / 16) * (bb ** 2))
mutual_info.append(mi)
return mutual_info
def _normalize_eigenvectors(self, eigen_vectors, covs, sample_weights):
# Here we apply an euclidean mean. See pyRiemann for other metrics
mean_cov = np.average(covs, axis=0, weights=sample_weights)
for ii in range(eigen_vectors.shape[1]):
tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov),
eigen_vectors[:, ii])
eigen_vectors[:, ii] /= np.sqrt(tmp)
return eigen_vectors
def _order_components(self, covs, sample_weights, eigen_vectors,
eigen_values, component_order):
n_classes = len(self._classes)
if component_order == 'mutual_info' and n_classes > 2:
mutual_info = self._compute_mutual_info(covs, sample_weights,
eigen_vectors)
ix = np.argsort(mutual_info)[::-1]
elif component_order == 'mutual_info' and n_classes == 2:
ix = np.argsort(np.abs(eigen_values - 0.5))[::-1]
elif component_order == 'alternate' and n_classes == 2:
i = np.argsort(eigen_values)
ix = np.empty_like(i)
ix[1::2] = i[:len(i) // 2]
ix[0::2] = i[len(i) // 2:][::-1]
return ix
def _ajd_pham(X, eps=1e-6, max_iter=15):
"""Approximate joint diagonalization based on Pham's algorithm.
This is a direct implementation of the PHAM's AJD algorithm [1].
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_channels)
A set of covariance matrices to diagonalize.
eps : float, default 1e-6
The tolerance for stopping criterion.
max_iter : int, default 1000
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
The diagonalizer.
D : ndarray, shape (n_epochs, n_channels, n_channels)
The set of quasi diagonal matrices.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive
definite Hermitian matrices." SIAM Journal on Matrix Analysis and
Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(max_iter):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V, D
@fill_doc
class SPoC(CSP):
"""Implementation of the SPoC spatial filtering.
Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to
extract spatial filters and
patterns by using a target (continuous) variable in the decomposition
process in order to give preference to components whose power correlates
with the target variable.
SPoC can be seen as an extension of the CSP driven by a continuous
variable rather than a discrete variable. Typical applications include
extraction of motor patterns using EMG power or audio patterns using sound
envelope.
Parameters
----------
n_components : int
The number of components to decompose M/EEG signals.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed to ``method`` to
:func:`mne.compute_covariance`.
log : None | bool (default None)
If transform_into == 'average_power' and log is None or True, then
applies a log transform to standardize the features, else the features
are z-scored. If transform_into == 'csp_space', then log must be None.
transform_into : {'average_power', 'csp_space'}
If 'average_power' then self.transform will return the average power of
each spatial filter. If 'csp_space' self.transform will return the data
in CSP space. Defaults to 'average_power'.
cov_method_params : dict | None
Parameters to pass to :func:`mne.compute_covariance`.
.. versionadded:: 0.16
%(rank_None)s
.. versionadded:: 0.17
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the SPoC spatial filters, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the SPoC spatial patterns, else None.
mean_ : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_components,)
If fit, the std squared power for each component.
See Also
--------
mne.preprocessing.Xdawn, CSP
References
----------
.. footbibliography::
"""
def __init__(self, n_components=4, reg=None, log=None,
transform_into='average_power', cov_method_params=None,
rank=None):
"""Init of SPoC."""
super(SPoC, self).__init__(n_components=n_components, reg=reg, log=log,
cov_est="epoch", norm_trace=False,
transform_into=transform_into, rank=rank,
cov_method_params=cov_method_params)
# Covariance estimation have to be done on the single epoch level,
# unlike CSP where covariance estimation can also be achieved through
# concatenation of all epochs from the same class.
delattr(self, 'cov_est')
delattr(self, 'norm_trace')
def fit(self, X, y):
"""Estimate the SPoC decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the SPoC.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of SPoC
Returns the modified instance.
"""
self._check_Xy(X, y)
if len(np.unique(y)) < 2:
raise ValueError("y must have at least two distinct values.")
# The following code is directly copied from pyRiemann
# Normalize target variable
target = y.astype(np.float64)
target -= target.mean()
target /= target.std()
n_epochs, n_channels = X.shape[:2]
# Estimate single trial covariance
covs = np.empty((n_epochs, n_channels, n_channels))
for ii, epoch in enumerate(X):
covs[ii] = _regularized_covariance(
epoch, reg=self.reg, method_params=self.cov_method_params,
rank=self.rank)
C = covs.mean(0)
Cz = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0)
# solve eigenvalue decomposition
evals, evecs = linalg.eigh(Cz, C)
evals = evals.real
evecs = evecs.real
# sort vectors
ix = np.argsort(np.abs(evals))[::-1]
# sort eigenvectors
evecs = evecs[:, ix].T
# spatial patterns
self.patterns_ = linalg.pinv(evecs).T # n_channels x n_channels
self.filters_ = evecs # n_channels x n_channels
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X):
"""Estimate epochs sources given the SPoC filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times).
"""
return super(SPoC, self).transform(X)
| bsd-3-clause |
vsoch/mriqc | mriqc/motion.py | 1 | 2214 | '''
motion.py: part of mriqc package
'''
import numpy as np
import pylab as plt
import seaborn as sns
from mriqc.misc import plot_vline
from matplotlib.figure import Figure
from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
from matplotlib.gridspec import GridSpec
def calc_frame_dispalcement(realignment_parameters_file):
lines = open(realignment_parameters_file, 'r').readlines()
rows = [[float(x) for x in line.split()] for line in lines]
cols = np.array([list(col) for col in zip(*rows)])
translations = np.transpose(np.abs(np.diff(cols[0:3, :])))
rotations = np.transpose(np.abs(np.diff(cols[3:6, :])))
FD_power = np.sum(translations, axis = 1) + (50*3.141/180)*np.sum(rotations, axis =1)
#FD is zero for the first time point
FD_power = np.insert(FD_power, 0, 0)
return FD_power
def get_mean_frame_displacement_disttribution(realignment_parameters_files):
mean_FDs = []
max_FDs = []
for realignment_parameters_file in realignment_parameters_files:
FD_power = calc_frame_dispalcement(realignment_parameters_file)
mean_FDs.append(FD_power.mean())
max_FDs.append(FD_power.max())
return mean_FDs, max_FDs
def plot_frame_displacement(realignment_parameters_file, mean_FD_distribution=None, figsize=(11.7,8.3)):
FD_power = calc_frame_dispalcement(realignment_parameters_file)
fig = Figure(figsize=figsize)
FigureCanvas(fig)
if mean_FD_distribution:
grid = GridSpec(2, 4)
else:
grid = GridSpec(1, 4)
ax = fig.add_subplot(grid[0,:-1])
ax.plot(FD_power)
ax.set_xlim((0, len(FD_power)))
ax.set_ylabel("Frame Displacement [mm]")
ax.set_xlabel("Frame number")
ylim = ax.get_ylim()
ax = fig.add_subplot(grid[0,-1])
sns.distplot(FD_power, vertical=True, ax=ax)
ax.set_ylim(ylim)
if mean_FD_distribution:
ax = fig.add_subplot(grid[1,:])
sns.distplot(mean_FD_distribution, ax=ax)
ax.set_xlabel("Mean Frame Dispalcement (over all subjects) [mm]")
MeanFD = FD_power.mean()
label = "MeanFD = %g"%MeanFD
plot_vline(MeanFD, label, ax=ax)
return fig
| bsd-3-clause |
adiIspas/Machine-Learning_A-Z | Machine Learning A-Z/Part 3 - Classification/Section 14 - Logistic Regression/classification_template.py | 37 | 2538 | # Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | mit |
annayqho/TheCannon | code/lamost/xcalib_4labels/xcalib_paper.py | 1 | 5975 | """ Mass production for the xcalib paper.
All you need to change each time is the date you want to run. """
import numpy as np
import pickle
import glob
import os
from matplotlib import rc
sys.path.insert(0, '/home/annaho/xcalib/TheCannon')
from lamost import load_spectra
from TheCannon import dataset
from TheCannon import model
from lamost import load_spectra
from astropy.table import Table
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
rc('text', usetex=True)
rc('font', family='serif')
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def run(date):
# Training step has already been completed. Load the model,
spectral_model = model.CannonModel(2) # 2 = quadratic model
spectral_model.coeffs = np.load("./coeffs.npz")['arr_0']
spectral_model.scatters = np.load("./scatter.npz")['arr_0']
spectral_model.chisqs = np.load("./chisqs.npz")['arr_0']
spectral_model.pivots = np.load("./pivots.npz")['arr_0']
# Load the wavelength array
wl = np.load("wl.npz")['arr_0']
# Load the test set,
test_ID = np.loadtxt("test_obj/%s_test_obj.txt" %date, dtype=str)
print("%s test objects" %len(test_ID))
dir_dat = "/home/share/LAMOST/DR2/DR2_release"
test_IDs, wl, test_flux, test_ivar = load_spectra(dir_dat, test_ID)
np.savez("output/%s_ids" %date, test_IDs)
#np.savez("./%s_data_raw" %date, test_flux, test_ivar)
# Load the corresponding LAMOST labels,
labels = np.load("lamost_labels/lamost_labels_%s.npz" %date)['arr_0']
inds = np.array([np.where(labels[:,0]==a)[0][0] for a in test_IDs])
nstars = len(test_IDs)
lamost_labels = np.zeros((nstars,4))
lamost_labels[:,0:3] = labels[inds,:][:,1:].astype(float)
np.savez("output/%s_lamost_label" %date, lamost_labels)
# Set dataset object
data = dataset.Dataset(
wl, test_IDs, test_flux, test_ivar,
lamost_labels, test_IDs, test_flux, test_ivar)
# set the headers for plotting
data.set_label_names(['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]'])
# Plot SNR distribution
data.diagnostics_SNR(figname="%s_SNRdist.png" %date)
np.savez("output/%s_SNR" %date, data.test_SNR)
# Continuum normalize,
filename = "output/%s_norm.npz" %date
if glob.glob(filename):
print("already cont normalized")
data.test_flux = np.load(filename)['arr_0']
data.test_ivar = np.load(filename)['arr_1']
else:
data.tr_ID = data.tr_ID[0]
data.tr_flux = data.tr_flux[0,:]
data.tr_ivar = data.tr_ivar[0,:]
data.continuum_normalize_gaussian_smoothing(L=50)
np.savez("output/%s_norm" %date, data.test_flux, data.test_ivar)
# Infer labels
errs, chisq = spectral_model.infer_labels(data)
np.savez("output/%s_cannon_labels.npz" %date, data.test_label_vals)
np.savez("./%s_formal_errors.npz" %date, errs)
np.savez("./%s_chisq.npz" %date, chisq)
# Make plots
data.test_label_vals = data.test_label_vals[:,0:3] # so it doesn't try alpha
data.set_label_names(['T_{eff}', '\log g', '[M/H]'])
data.diagnostics_1to1(figname="%s_1to1_label" %date)
# Find chi sq of fit
# lvec = _get_lvec(list(data.test_label_vals[jj,:]-model.pivots))
# chi = data.tr_flux[jj,:] - (np.dot(coeffs, lvec) + model.coeffs[:,0])
# chisq_star = sum(chi**2)
# dof = npix - nparam
if __name__ == "__main__":
#date = '20130324'
#print(date)
#run(date)
# dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
# dates = np.array(dates)
# dates = np.delete(dates, np.where(dates=='.directory')[0][0])
# dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
# dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
# dates = np.delete(dates, np.where(dates=='20140330')[0][0]) # no obj
# dates = np.delete(dates, np.where(dates=='20121028')[0][0]) # no obj
# for date in dates[366:]:
# print(date)
# run(date)
# run(date)
# DR1 = dates before sept 2013
# if np.logical_and(int(date[0:4]) < 2014, int(date[4:6]) < 9):
# print("is DR1")
# if len(glob.glob("%s_1to1_label_0.png" %date)) == 0:
# print("isn't done yet, running TC")
# run(date)
a = glob.glob("output/2*cannon_labels.npz")
b = glob.glob("output/2*ids.npz")
c = glob.glob("output/2*SNR.npz")
d = glob.glob("output/2*formal_errors.npz")
e = glob.glob("output/2*chisq.npz")
ids_all = np.array([])
teff_all = np.array([])
logg_all = np.array([])
feh_all = np.array([])
alpha_all = np.array([])
SNRs_all = np.array([])
errs_all = np.array([])
chisq_all = np.array([])
print("cannon labels")
for filename in a:
labels = np.load(filename)['arr_0']
teff_all = np.append(teff_all, labels[:,0])
logg_all = np.append(logg_all, labels[:,1])
feh_all = np.append(feh_all, labels[:,2])
alpha_all = np.append(alpha_all, labels[:,3])
print("IDs")
for filename in b:
ids = np.load(filename)['arr_0']
ids_all = np.append(ids_all, ids)
print("SNRs")
for filename in c:
SNR = np.load(filename)['arr_0']
SNRs_all = np.append(SNRs_all, SNR)
print("Formal Errors")
for filename in d:
errs = np.load(filename)['arr_0']
errs_all = np.append(errs_all, errs)
print("Chi Sq")
for filename in e:
chisq = np.load(filename)['arr_0']
chisq_all = np.append(chisq_all, chisq)
np.savez("DR2/teff_all.npz",teff_all)
np.savez("DR2/logg_all.npz",logg_all)
np.savez("DR2/feh_all.npz",feh_all)
np.savez("DR2/alpha_all.npz", alpha_all)
np.savez("DR2/ids_all.npz", ids_all)
np.savez("DR2/chisq_all.npz", chisq_all)
np.savez("DR2/SNRs_all.npz", SNRs_all)
np.savez("DR2/errs_all.npz", errs_all)
| mit |
mantidproject/mantid | scripts/Muon/GUI/Common/plot_widget/plotting_canvas/plotting_canvas_view.py | 3 | 14372 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from typing import List
from matplotlib.container import ErrorbarContainer
from qtpy import QtWidgets, QtCore
from Muon.GUI.Common.plot_widget.plotting_canvas.plot_toolbar import PlotToolbar
from Muon.GUI.Common.plot_widget.plotting_canvas.plotting_canvas_model import WorkspacePlotInformation
from Muon.GUI.Common.plot_widget.plotting_canvas.plotting_canvas_view_interface import PlottingCanvasViewInterface
from Muon.GUI.Common.plot_widget.plotting_canvas.plot_color_queue import ColorQueue
from mantid import AnalysisDataService
from mantid.plots import legend_set_draggable
from mantid.plots.plotfunctions import get_plot_fig
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.qt_compat import is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import FigureCanvas
else:
from matplotlib.backends.backend_qt4agg import FigureCanvas
# Default color cycle using Matplotlib color codes C0, C1...ect
NUMBER_OF_COLOURS = 10
DEFAULT_COLOR_CYCLE = ["C" + str(index) for index in range(NUMBER_OF_COLOURS)]
def _do_single_plot(ax, workspace, index, errors, plot_kwargs):
plot_fn = ax.errorbar if errors else ax.plot
plot_kwargs['wkspIndex'] = index
plot_fn(workspace, **plot_kwargs)
def get_y_min_max_between_x_range(line, x_min, x_max, y_min, y_max):
x, y = line.get_data()
for i in range(len(x)):
if x_min <= x[i] <= x_max:
y_min = min(y_min, y[i])
y_max = max(y_max, y[i])
return y_min, y_max
class PlottingCanvasView(QtWidgets.QWidget, PlottingCanvasViewInterface):
def __init__(self, quick_edit, min_y_range, y_axis_margin, parent=None):
super().__init__(parent)
# later we will allow these to be changed in the settings
self._min_y_range = min_y_range
self._y_axis_margin = y_axis_margin
# create the figure
self.fig = Figure()
self.fig.canvas = FigureCanvas(self.fig)
self.fig.canvas.setMinimumHeight(500)
self.toolBar = PlotToolbar(self.fig.canvas, self)
# Create a set of Mantid axis for the figure
self.fig, axes = get_plot_fig(overplot=False, ax_properties=None, axes_num=1,
fig=self.fig)
self._number_of_axes = 1
self._color_queue = [ColorQueue(DEFAULT_COLOR_CYCLE)]
# Add a splitter for the plotting canvas and quick edit toolbar
splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(self.fig.canvas)
self._quick_edit = quick_edit
splitter.addWidget(self._quick_edit)
splitter.setChildrenCollapsible(False)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.toolBar)
layout.addWidget(splitter)
self.setLayout(layout)
self._plot_information_list = [] # type : List[PlotInformation}
@property
def autoscale_state(self):
return self._quick_edit.autoscale_state
@property
def plotted_workspace_information(self):
return self._plot_information_list
@property
def plotted_workspaces_and_indices(self):
plotted_workspaces = []
plotted_indices = []
for plot_info in self._plot_information_list:
plotted_workspaces.append(plot_info.workspace_name)
plotted_indices.append(plot_info.index)
return plotted_workspaces, plotted_indices
@property
def num_plotted_workspaces(self):
return len(self._plot_information_list)
@property
def number_of_axes(self):
return self._number_of_axes
def create_new_plot_canvas(self, num_axes):
"""Creates a new blank plotting canvas"""
self.toolBar.reset_gridline_flags()
self._plot_information_list = []
self._number_of_axes = num_axes
self._color_queue = [ColorQueue(DEFAULT_COLOR_CYCLE) for _ in range(num_axes)]
self.fig.clf()
self.fig, axes = get_plot_fig(overplot=False, ax_properties=None, axes_num=num_axes,
fig=self.fig)
self.fig.tight_layout()
self.fig.canvas.draw()
def clear_all_workspaces_from_plot(self):
"""Clears all workspaces from the plot"""
for ax in self.fig.axes:
ax.cla()
ax.tracked_workspaces.clear()
ax.set_prop_cycle(None)
for color_queue in self._color_queue:
color_queue.reset()
self._plot_information_list = []
def add_workspaces_to_plot(self, workspace_plot_info_list: List[WorkspacePlotInformation]):
"""Add a list of workspaces to the plot - The workspaces are contained in a list PlotInformation
The PlotInformation contains the workspace name, workspace index and target axis."""
for workspace_plot_info in workspace_plot_info_list:
workspace_name = workspace_plot_info.workspace_name
try:
workspace = AnalysisDataService.Instance().retrieve(workspace_name)
except (RuntimeError, KeyError):
continue
self._plot_information_list.append(workspace_plot_info)
errors = workspace_plot_info.errors
ws_index = workspace_plot_info.index
axis_number = workspace_plot_info.axis
ax = self.fig.axes[axis_number]
plot_kwargs = self._get_plot_kwargs(workspace_plot_info)
plot_kwargs['color'] = self._color_queue[axis_number]()
_do_single_plot(ax, workspace, ws_index, errors=errors,
plot_kwargs=plot_kwargs)
def remove_workspace_info_from_plot(self, workspace_plot_info_list: List[WorkspacePlotInformation]):
# We reverse the workspace info list so that we can maintain a unique color queue
# See _update_color_queue_on_workspace_removal for more
workspace_plot_info_list.reverse()
for workspace_plot_info in workspace_plot_info_list:
workspace_name = workspace_plot_info.workspace_name
if not AnalysisDataService.Instance().doesExist(workspace_name):
continue
workspace = AnalysisDataService.Instance().retrieve(workspace_name)
for plotted_information in self._plot_information_list.copy():
if workspace_plot_info.workspace_name == plotted_information.workspace_name and \
workspace_plot_info.axis == plotted_information.axis:
self._update_color_queue_on_workspace_removal(workspace_plot_info.axis, workspace_name)
axis = self.fig.axes[workspace_plot_info.axis]
axis.remove_workspace_artists(workspace)
self._plot_information_list.remove(plotted_information)
# If we have no plotted lines, reset the color cycle
if self.num_plotted_workspaces == 0:
self._reset_color_cycle()
def remove_workspace_from_plot(self, workspace):
"""Remove all references to a workspaces from the plot """
for workspace_plot_info in self._plot_information_list.copy():
workspace_name = workspace_plot_info.workspace_name
if workspace_name == workspace.name():
self._update_color_queue_on_workspace_removal(workspace_plot_info.axis, workspace_name)
axis = self.fig.axes[workspace_plot_info.axis]
axis.remove_workspace_artists(workspace)
self._plot_information_list.remove(workspace_plot_info)
def _update_color_queue_on_workspace_removal(self, axis_number, workspace_name):
try:
artist_info = self.fig.axes[axis_number].tracked_workspaces[workspace_name]
except KeyError:
return
for ws_artist in artist_info:
for artist in ws_artist._artists:
if isinstance(artist, ErrorbarContainer):
color = artist[0].get_color()
else:
color = artist.get_color()
# When we repeat colors we don't want to add colors to the queue if they are already plotted.
# We know we are repeating colors if we have more lines than colors, then we check if the color
# removed is already the color of an existing line. If it is we don't manually re-add the color
# to the queue. This ensures we only plot lines of the same colour if we have more lines
# plotted than colours
lines = self.fig.axes[axis_number].get_lines()
if len(lines) > NUMBER_OF_COLOURS:
current_colors = [line.get_c() for line in lines]
if color in current_colors:
return
self._color_queue[axis_number] += color
# Ads observer functions
def replace_specified_workspace_in_plot(self, workspace):
"""Replace specified workspace in the plot with a new and presumably updated instance"""
for workspace_plot_info in self._plot_information_list:
plotted_workspace_name = workspace_plot_info.workspace_name
workspace_name = workspace.name()
if workspace_name == plotted_workspace_name:
axis = self.fig.axes[workspace_plot_info.axis]
axis.replace_workspace_artists(workspace)
self.redraw_figure()
def replot_workspace_with_error_state(self, workspace_name, with_errors: bool):
for plot_info in self.plotted_workspace_information:
if plot_info.workspace_name == workspace_name:
axis = self.fig.axes[plot_info.axis]
workspace_name = plot_info.workspace_name
artist_info = axis.tracked_workspaces[workspace_name]
for ws_artist in artist_info:
for artist in ws_artist._artists:
if isinstance(artist, ErrorbarContainer):
color = artist[0].get_color()
else:
color = artist.get_color()
plot_kwargs = self._get_plot_kwargs(plot_info)
plot_kwargs["color"] = color
axis.replot_artist(artist, with_errors, **plot_kwargs)
self.redraw_figure()
def set_axis_xlimits(self, axis_number, xlims):
ax = self.fig.axes[axis_number]
ax.set_xlim(xlims[0], xlims[1])
def set_axis_ylimits(self, axis_number, ylims):
ax = self.fig.axes[axis_number]
ax.set_ylim(ylims[0], ylims[1])
def set_axes_limits(self, xlim, ylim):
plt.setp(self.fig.axes, xlim=xlim, ylim=ylim)
def autoscale_y_axes(self):
ymin = 1e9
ymax = -1e9
for axis in self.fig.axes:
ymin_i, ymax_i = self._get_y_axis_autoscale_limits(axis)
if ymin_i < ymin:
ymin = ymin_i
if ymax_i > ymax:
ymax = ymax_i
plt.setp(self.fig.axes, ylim=[ymin, ymax])
@property
def get_xlim_list(self):
xlim_list=[]
for axis in self.fig.axes:
min, max = axis.get_xlim()
xlim_list.append([min,max])
return xlim_list
@property
def get_ylim_list(self):
ylim_list=[]
for axis in self.fig.axes:
min, max = axis.get_ylim()
ylim_list.append([min,max])
return ylim_list
def autoscale_selected_y_axis(self, axis_number):
if axis_number >= len(self.fig.axes):
return
axis = self.fig.axes[axis_number]
bottom, top, = self._get_y_axis_autoscale_limits(axis)
axis.set_ylim(bottom, top)
def set_title(self, axis_number, title):
if axis_number >= self.number_of_axes:
return
axis = self.fig.axes[axis_number]
axis.set_title(title)
def get_axis_limits(self, axis_number):
xmin, xmax = self.fig.axes[axis_number].get_xlim()
ymin, ymax = self.fig.axes[axis_number].get_ylim()
return xmin, xmax, ymin, ymax
def redraw_figure(self):
self.fig.canvas.toolbar.update()
self._redraw_legend()
self.fig.tight_layout()
self.fig.canvas.draw()
def _redraw_legend(self):
for ax in self.fig.axes:
if ax.get_legend_handles_labels()[0]:
legend = ax.legend(prop=dict(size=5))
legend_set_draggable(legend, True)
def _get_plot_kwargs(self, workspace_info: WorkspacePlotInformation):
label = workspace_info.label
plot_kwargs = {'distribution': True, 'autoscale_on_update': False, 'label': label}
return plot_kwargs
def _get_y_axis_autoscale_limits(self, axis):
x_min, x_max = sorted(axis.get_xlim())
y_min, y_max = np.inf, -np.inf
for line in axis.lines:
y_min, y_max = get_y_min_max_between_x_range(line, x_min, x_max, y_min, y_max)
if y_min == np.inf:
y_min = -self._min_y_range
if y_max == -np.inf:
y_max = self._min_y_range
if y_min == y_max:
y_min -= self._min_y_range
y_max += self._min_y_range
y_margin = abs(y_max - y_min) * self._y_axis_margin
return y_min - y_margin, y_max + y_margin
def _reset_color_cycle(self):
for i, ax in enumerate(self.fig.axes):
ax.cla()
ax.tracked_workspaces.clear()
def resizeEvent(self, event):
self.fig.tight_layout()
def add_uncheck_autoscale_subscriber(self, observer):
self.toolBar.uncheck_autoscale_notifier.add_subscriber(observer)
def add_enable_autoscale_subscriber(self, observer):
self.toolBar.enable_autoscale_notifier.add_subscriber(observer)
def add_disable_autoscale_subscriber(self, observer):
self.toolBar.uncheck_autoscale_notifier.add_subscriber(observer)
def add_range_changed_subscriber(self, observer):
self.toolBar.range_changed_notifier.add_subscriber(observer)
| gpl-3.0 |
gibiansky/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 13 | 4303 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using convolutional networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(tf.one_hot(features, 256, 1, 0),
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
jkarnows/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e356.py | 2 | 5854 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.8,
one_target_per_seq=False,
n_seq_per_batch=16,
# subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-4,
learning_rate_changes_by_iteration={
5000: 1e-5,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512 * 8
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': N * 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
MaxHalford/xam | xam/preprocessing/groupby_transformer.py | 1 | 2805 | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.base import TransformerMixin
class GroupbyTransformer(BaseEstimator, TransformerMixin):
def __init__(self, base_transformer, by):
self.base_transformer = base_transformer
self.by = by
def _get_transform_columns(self, X):
return [col for col in X.columns if col != self.by]
def fit(self, X, y=None, **fit_params):
if not isinstance(X, pd.DataFrame):
raise ValueError('X is not a pandas.DataFrame')
self.transformers_ = {}
if y is None:
y = np.zeros(shape=len(X))
columns = self._get_transform_columns(X)
for key in X[self.by].unique():
# Copy the transformer
transformer = clone(self.base_transformer)
# Select the rows that will be fitted
mask = (X[self.by] == key).tolist()
rows = X.index[mask]
# Fit the transformer
transformer.fit(X.loc[rows, columns], y[mask], **fit_params)
# Save the transformer
self.transformers_[key] = transformer
return self
def transform(self, X):
if not isinstance(X, pd.DataFrame):
raise ValueError('X is not a pandas.DataFrame')
columns = self._get_transform_columns(X)
for key in X[self.by].unique():
# Check if a transformer is associated with the key
transformer = self.transformers_.get(key)
if transformer is None:
raise ValueError('No transformer is associated with key {}'.format(key))
# Select the rows to transform
mask = (X[self.by] == key).tolist()
rows = X.index[mask]
# Transform the rows
X.loc[rows, columns] = transformer.transform(X.loc[rows, columns])
return X
def fit_transform(self, X, y=None, **fit_params):
if not isinstance(X, pd.DataFrame):
raise ValueError('X is not a pandas.DataFrame')
self.transformers_ = {}
if y is None:
y = np.zeros(shape=len(X))
columns = self._get_transform_columns(X)
for key in X[self.by].unique():
# Copy the transformer
transformer = clone(self.base_transformer)
# Select the rows that will be fitted
mask = (X[self.by] == key).tolist()
rows = X.index[mask]
# Fit and transform
X.loc[rows, columns] = transformer.fit_transform(
X.loc[rows, columns],
y[mask],
**fit_params
)
# Save the transformer
self.transformers_[key] = transformer
return X
| mit |
Erotemic/plottool | broken/_grave.py | 1 | 2857 |
def test_integral_label_colormap():
"""
UNFINISHED
Above 0 use a inverted hot scale and less than that use special colors
References:
http://stackoverflow.com/questions/18704353/correcting-matplotlib-colorbar-ticks
http://stackoverflow.com/questions/15908371/matplotlib-colorbars-and-its-text-labels
http://stackoverflow.com/questions/14777066/matplotlib-discrete-colorbar
Example:
>>> from plottool_ibeis.draw_func2 import * # NOQA
"""
def label_domain(unique_scalars):
diff = np.diff(unique_scalars)
# Find the holes in unique_scalars
missing_vals = []
for diffx in np.where(diff > 1)[0]:
missing_vals.extend([(unique_scalars[diffx] + x + 1) for x in range(diff[diffx] - 1)])
# Find the indices of those holes
missing_ixs = np.array(missing_vals) - min_
assert all([val not in unique_scalars for val in missing_vals])
domain = np.array([x for ix, x in enumerate(unique_scalars) if ix not in missing_ixs])
domain -= min_
return domain
from plottool_ibeis import df2
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import utool
fig, ax = plt.subplots()
np.random.seed(0)
data = (np.random.random((10, 10)) * 13).astype(np.int32) - 2
data[data == 0] = 12
unique_scalars = np.array(sorted(np.unique(data)))
max_ = unique_scalars.max()
min_ = unique_scalars.min()
range_ = max_ - min_
bounds = np.linspace(min_, max_ + 1, range_ + 2)
base_colormap = df2.reverse_colormap(plt.get_cmap('hot'))
# Get a few more colors than we actually need so we don't hit the bottom of
# the cmap
colors_ix = np.concatenate((np.linspace(0, 1., range_ + 2), (0., 0., 0., 0.)))
colors_rgba = base_colormap(colors_ix)
val2_special_rgba = {
-1: df2.UNKNOWN_PURP,
-2: df2.LIGHT_BLUE,
}
def get_new_color(ix, val):
if val in val2_special_rgba:
return val2_special_rgba[val]
else:
return colors_rgba[ix - len(val2_special_rgba) + 1]
special_colors = [get_new_color(ix, val) for ix, val in enumerate(bounds)]
cmap = mpl.colors.ListedColormap(special_colors)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
ax.imshow(data, interpolation='nearest', cmap=cmap, norm=norm)
sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
sm.set_clim(-.5, range_ + 0.5)
colorbar = plt.colorbar(sm)
missing_ixs = utool.find_nonconsec_indices(unique_scalars, bounds)
sel_bounds = np.array([x for ix, x in enumerate(bounds) if ix not in missing_ixs])
ticks = sel_bounds + .5
ticklabels = sel_bounds
colorbar.set_ticks(ticks) # tick locations
colorbar.set_ticklabels(ticklabels) # tick labels
| apache-2.0 |
balazssimon/ml-playground | udemy/lazyprogrammer/logistic-regression-python/bad_xor.py | 1 | 1565 | # logisitc regression classifier for the XOR problem.
#
# the notes for this class can be found at:
# https://deeplearningcourses.com/c/data-science-logistic-regression-in-python
# https://www.udemy.com/data-science-logistic-regression-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
N = 4
D = 2
# XOR
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
])
T = np.array([0, 1, 1, 0])
# add a column of ones
ones = np.ones((N, 1))
# add a column of xy = x*y
Xb = np.concatenate((ones, X), axis=1)
# randomly initialize the weights
w = np.random.randn(D + 1)
# calculate the model output
z = Xb.dot(w)
def sigmoid(z):
return 1/(1 + np.exp(-z))
Y = sigmoid(z)
# calculate the cross-entropy error
def cross_entropy(T, Y):
return -(T*np.log(Y) + (1-T)*np.log(1-Y)).sum()
# let's do gradient descent 100 times
learning_rate = 0.001
error = []
w_mags = []
for i in range(100000):
e = cross_entropy(T, Y)
error.append(e)
if i % 1000 == 0:
print(e)
# gradient descent weight udpate with regularization
w += learning_rate * Xb.T.dot(T - Y)
w_mags.append(w.dot(w))
# recalculate Y
Y = sigmoid(Xb.dot(w))
plt.plot(error)
plt.title("Cross-entropy per iteration")
plt.show()
plt.plot(w_mags)
plt.title("w^2 magnitudes")
plt.show()
print("Final w:", w)
print("Final classification rate:", 1 - np.abs(T - np.round(Y)).sum() / N)
| apache-2.0 |
ilo10/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
Erotemic/ubelt | ubelt/util_format.py | 1 | 40528 | # -*- coding: utf-8 -*-
"""
Defines the function :func:`repr2`, which allows for a bit more customization
than :func:`repr` or :func:`pprint`. See the docstring for more details.
Two main goals of repr2 are to provide nice string representations of nested
data structures and make those "eval-able" whenever possible. As an example
take the value `float('inf')`, which normaly has a non-evalable repr of `inf`:
>>> import ubelt as ub
>>> ub.repr2(float('inf'))
"float('inf')"
The `newline` (or `nl`) keyword argument can control how deep in the nesting
newlines are allowed.
>>> print(ub.repr2({1: float('nan'), 2: float('inf'), 3: 3.0}))
{
1: float('nan'),
2: float('inf'),
3: 3.0,
}
>>> print(ub.repr2({1: float('nan'), 2: float('inf'), 3: 3.0}, nl=0))
{1: float('nan'), 2: float('inf'), 3: 3.0}
You can also define or overwrite how representations for different types are
created. You can either create your own extension object, or you can
monkey-patch `ub.util_format._FORMATTER_EXTENSIONS` without specifying the
extensions keyword argument (although this will be a global change).
>>> extensions = ub.util_format.FormatterExtensions()
>>> @extensions.register(float)
>>> def my_float_formater(data, **kw):
>>> return "monkey({})".format(data)
>>> print(ub.repr2({1: float('nan'), 2: float('inf'), 3: 3.0}, nl=0, extensions=extensions))
{1: monkey(nan), 2: monkey(inf), 3: monkey(3.0)}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import sys
# from typing import List, Callable, Type, Dict
PY2 = sys.version_info[0] == 2
if PY2:
import six
iteritems = six.iteritems
string_types = six.string_types
text_type = six.text_type
else:
string_types = (str,)
text_type = str
def iteritems(d, **kw):
return d.items(**kw)
def repr2(data, **kwargs):
"""
Makes a pretty string representation of ``data``.
Makes a pretty and easy-to-doctest string representation. Has nice handling
of common nested datatypes. This is an alternative to repr, and
:func:`pprint.pformat`.
This output of this function are very configurable. By default it aims to
produce strings that are executable and consistent between Python versions.
This makes them great for doctests.
Notes:
This function has many keyword arguments that can be used to customize
the final representation. For convinience some of the more frequently
used kwargs have short aliases. See "Kwargs" for more details.
Args:
data (object): an arbitrary python object
**kwargs: see "the Kwargs" section
Kwargs:
si, stritems, (bool):
dict/list items use str instead of repr
strkeys, sk (bool):
dict keys use str instead of repr
strvals, sv (bool):
dict values use str instead of repr
nl, newlines (int | bool):
number of top level nestings to place a newline after. If true all
items are followed by newlines regardless of nesting level.
Defaults to 1 for lists and True for dicts.
nobr, nobraces (bool, default=False):
if True, text will not contain outer braces for containers
cbr, compact_brace (bool, default=False):
if True, braces are compactified (i.e. they will not have newlines
placed directly after them, think java / K&R / 1TBS)
trailsep, trailing_sep (bool):
if True, a separator is placed after the last item in a sequence.
By default this is True if there are any ``nl > 0``.
explicit (bool, default=False):
changes dict representation from ``{k1: v1, ...}`` to
``dict(k1=v1, ...)``.
precision (int, default=None):
if specified floats are formatted with this precision
kvsep (str, default=': '):
separator between keys and values
itemsep (str, default=' '):
separator between items
sort (bool | callable, default=None):
if None, then sort unordered collections, but keep the ordering of
ordered collections. This option attempts to be determenistic in
most cases.
New in 0.8.0: if ``sort`` is callable, it will be used as a
key-function to sort all collections.
if False, then nothing will be sorted, and the representation of
unordered collections will be arbitrary and possibly
non-determenistic.
if True, attempts to sort all collections in the returned text.
Currently if True this WILL sort lists.
Currently if True this WILL NOT sort OrderedDicts.
NOTE:
The previous behavior may not be intuitive, as such the
behavior of this arg is subject to change.
suppress_small (bool):
passed to :func:`numpy.array2string` for ndarrays
max_line_width (int):
passed to :func:`numpy.array2string` for ndarrays
with_dtype (bool):
only relevant to numpy.ndarrays. if True includes the dtype.
Defaults to `not strvals`.
align (bool | str, default=False):
if True, will align multi-line dictionaries by the kvsep
extensions (FormatterExtensions):
a custom :class:`FormatterExtensions` instance that can overwrite or
define how different types of objects are formatted.
Returns:
str: outstr - output string
Notes:
There are also internal kwargs, which should not be used:
_return_info (bool): return information about child context
_root_info (depth): information about parent context
Example:
>>> from ubelt.util_format import *
>>> import ubelt as ub
>>> dict_ = {
... 'custom_types': [slice(0, 1, None), 1/3],
... 'nest_dict': {'k1': [1, 2, {3: {4, 5}}],
... 'key2': [1, 2, {3: {4, 5}}],
... 'key3': [1, 2, {3: {4, 5}}],
... },
... 'nest_dict2': {'k': [1, 2, {3: {4, 5}}]},
... 'nested_tuples': [tuple([1]), tuple([2, 3]), frozenset([4, 5, 6])],
... 'one_tup': tuple([1]),
... 'simple_dict': {'spam': 'eggs', 'ham': 'jam'},
... 'simple_list': [1, 2, 'red', 'blue'],
... 'odict': ub.odict([(1, '1'), (2, '2')]),
... }
>>> # In the interest of saving space we are only going to show the
>>> # output for the first example.
>>> result = repr2(dict_, nl=1, precision=2)
>>> print(result)
{
'custom_types': [slice(0, 1, None), 0.33],
'nest_dict': {'k1': [1, 2, {3: {4, 5}}], 'key2': [1, 2, {3: {4, 5}}], 'key3': [1, 2, {3: {4, 5}}]},
'nest_dict2': {'k': [1, 2, {3: {4, 5}}]},
'nested_tuples': [(1,), (2, 3), {4, 5, 6}],
'odict': {1: '1', 2: '2'},
'one_tup': (1,),
'simple_dict': {'ham': 'jam', 'spam': 'eggs'},
'simple_list': [1, 2, 'red', 'blue'],
}
>>> # You can try the rest yourself.
>>> result = repr2(dict_, nl=3, precision=2); print(result)
>>> result = repr2(dict_, nl=2, precision=2); print(result)
>>> result = repr2(dict_, nl=1, precision=2, itemsep='', explicit=True); print(result)
>>> result = repr2(dict_, nl=1, precision=2, nobr=1, itemsep='', explicit=True); print(result)
>>> result = repr2(dict_, nl=3, precision=2, cbr=True); print(result)
>>> result = repr2(dict_, nl=3, precision=2, si=True); print(result)
>>> result = repr2(dict_, nl=3, sort=True); print(result)
>>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False); print(result)
>>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False, nobr=True); print(result)
Example:
>>> from ubelt.util_format import *
>>> def _nest(d, w):
... if d == 0:
... return {}
... else:
... return {'n{}'.format(d): _nest(d - 1, w + 1), 'm{}'.format(d): _nest(d - 1, w + 1)}
>>> dict_ = _nest(d=4, w=1)
>>> result = repr2(dict_, nl=6, precision=2, cbr=1)
>>> print('---')
>>> print(result)
>>> result = repr2(dict_, nl=-1, precision=2)
>>> print('---')
>>> print(result)
"""
custom_extensions = kwargs.get('extensions', None)
_return_info = kwargs.get('_return_info', False)
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
outstr = None
_leaf_info = None
if custom_extensions:
func = custom_extensions.lookup(data)
if func is not None:
outstr = func(data, **kwargs)
if outstr is None:
if isinstance(data, dict):
outstr, _leaf_info = _format_dict(data, **kwargs)
elif isinstance(data, (list, tuple, set, frozenset)):
outstr, _leaf_info = _format_list(data, **kwargs)
if outstr is None:
# check any globally registered functions for special formatters
func = _FORMATTER_EXTENSIONS.lookup(data)
if func is not None:
outstr = func(data, **kwargs)
else:
outstr = _format_object(data, **kwargs)
if _return_info:
_leaf_info = _rectify_leaf_info(_leaf_info)
return outstr, _leaf_info
else:
return outstr
def _rectify_root_info(_root_info):
if _root_info is None:
_root_info = {
'depth': 0,
}
return _root_info
def _rectify_leaf_info(_leaf_info):
if _leaf_info is None:
_leaf_info = {
'max_height': 0,
'min_height': 0,
}
return _leaf_info
class FormatterExtensions(object):
"""
Helper class for managing non-builtin (e.g. numpy) format types.
This module (:mod:`ubelt.util_format`) maintains a global set of basic
extensions, but it is also possible to create a locally scoped set of
extensions and explicilty pass it to repr2. The following example
demonstrates this.
Example:
>>> import ubelt as ub
>>> class MyObject(object):
>>> pass
>>> data = {'a': [1, 2.2222, MyObject()], 'b': MyObject()}
>>> # Create a custom set of extensions
>>> extensions = ub.FormatterExtensions()
>>> # Register a function to format your specific type
>>> @extensions.register(MyObject)
>>> def format_myobject(data, **kwargs):
>>> return 'I can do anything here'
>>> # Repr2 will now respect the passed custom extensions
>>> # Note that the global extensions will still be respected
>>> # unless they are overloaded.
>>> print(ub.repr2(data, nl=-1, precision=1, extensions=extensions))
{
'a': [1, 2.2, I can do anything here],
'b': I can do anything here
}
>>> # Overload the formatter for float and int
>>> @extensions.register((float, int))
>>> def format_myobject(data, **kwargs):
>>> return str((data + 10) // 2)
>>> print(ub.repr2(data, nl=-1, precision=1, extensions=extensions))
{
'a': [5, 6.0, I can do anything here],
'b': I can do anything here
}
"""
# set_types = [set, frozenset]
# list_types = [list, tuple]
# dict_types = [dict]
# custom_types = {
# 'numpy': [],
# 'pandas': [],
# }
# @classmethod
# def sequence_types(cls):
# return cls.list_types + cls.set_types
def __init__(self):
self._type_registry = {} # type: Dict[Type, Callable]
self._typename_registry = {} # type: Dict[str, Callable]
self._lazy_queue = [] # type: List[Callable]
# self._lazy_registrations = [
# self._register_numpy_extensions,
# self._register_builtin_extensions,
# ]
def register(self, key):
"""
Registers a custom formatting function with ub.repr2
Args:
key (Type | Tuple[Type] | str): indicator of the type
Returns:
Callable: decorator function
"""
def _decorator(func):
if isinstance(key, tuple):
for t in key:
self._type_registry[t] = func
if isinstance(key, string_types):
self._typename_registry[key] = func
else:
self._type_registry[key] = func
return func
return _decorator
def lookup(self, data):
"""
Returns an appropriate function to format ``data`` if one has been
registered.
"""
# Evaluate the lazy queue if anything is in it
if self._lazy_queue:
for func in self._lazy_queue:
func()
self._lazy_queue = []
for type_, func in self._type_registry.items():
if isinstance(data, type_):
return func
# Fallback to registered typenames.
# If we cannot find a formatter for this type, then return None
typename = type(data).__name__
func = self._typename_registry.get(typename, None)
return func
def _register_pandas_extensions(self):
"""
Example:
>>> # xdoctest: +REQUIRES(module:pandas)
>>> # xdoctest: +IGNORE_WHITESPACE
>>> import pandas as pd
>>> import numpy as np
>>> import ubelt as ub
>>> rng = np.random.RandomState(0)
>>> data = pd.DataFrame(rng.rand(3, 3))
>>> print(ub.repr2(data))
>>> print(ub.repr2(data, precision=2))
>>> print(ub.repr2({'akeyfdfj': data}, precision=2))
"""
@self.register('DataFrame')
def format_pandas(data, **kwargs): # nocover
precision = kwargs.get('precision', None)
float_format = (None if precision is None
else '%.{}f'.format(precision))
formatted = data.to_string(float_format=float_format)
return formatted
# def _register_torch_extensions(self):
# @self.register('Tensor')
# def format_tensor(data, **kwargs):
# """
# Example:
# >>> # xdoctest: +REQUIRES(module:torch)
# >>> # xdoctest: +IGNORE_WHITESPACE
# >>> import torch
# >>> import numpy as np
# >>> data = np.array([[.2, 42, 5], [21.2, 3, .4]])
# >>> data = torch.from_numpy(data)
# >>> data = torch.rand(100, 100)
# >>> print('data = {}'.format(ub.repr2(data, nl=1)))
# >>> print(ub.repr2(data))
# """
# import numpy as np
# func = self._type_registry[np.ndarray]
# npdata = data.data.cpu().numpy()
# # kwargs['strvals'] = True
# kwargs['with_dtype'] = False
# formatted = func(npdata, **kwargs)
# # hack for prefix class
# formatted = formatted.replace('np.array', '__Tensor')
# # import ubelt as ub
# # formatted = ub.hzcat('Tensor(' + formatted + ')')
# return formatted
def _register_numpy_extensions(self):
"""
Example:
>>> # xdoctest: +REQUIRES(module:numpy)
>>> import sys
>>> import pytest
>>> import ubelt as ub
>>> if not ub.modname_to_modpath('numpy'):
... raise pytest.skip()
>>> # xdoctest: +IGNORE_WHITESPACE
>>> import numpy as np
>>> data = np.array([[.2, 42, 5], [21.2, 3, .4]])
>>> print(ub.repr2(data))
np.array([[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]], dtype=np.float64)
>>> print(ub.repr2(data, with_dtype=False))
np.array([[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]])
>>> print(ub.repr2(data, strvals=True))
[[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]]
>>> data = np.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=False))
np.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=True))
[]
>>> data = np.ma.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=False))
np.ma.empty((0, 10), dtype=np.float64)
"""
# TODO: should we register numpy using the new string method?
import numpy as np
@self.register(np.ndarray)
def format_ndarray(data, **kwargs):
import re
strvals = kwargs.get('sv', kwargs.get('strvals', False))
itemsep = kwargs.get('itemsep', ' ')
precision = kwargs.get('precision', None)
suppress_small = kwargs.get('supress_small', None)
max_line_width = kwargs.get('max_line_width', None)
with_dtype = kwargs.get('with_dtype', kwargs.get('dtype', not strvals))
newlines = kwargs.pop('nl', kwargs.pop('newlines', 1))
# if with_dtype and strvals:
# raise ValueError('cannot format with strvals and dtype')
separator = ',' + itemsep
if strvals:
prefix = ''
suffix = ''
else:
modname = type(data).__module__
# substitute shorthand for numpy module names
np_nice = 'np'
modname = re.sub('\\bnumpy\\b', np_nice, modname)
modname = re.sub('\\bma.core\\b', 'ma', modname)
class_name = type(data).__name__
if class_name == 'ndarray':
class_name = 'array'
prefix = modname + '.' + class_name + '('
if with_dtype:
dtype_repr = data.dtype.name
# dtype_repr = np.core.arrayprint.dtype_short_repr(data.dtype)
suffix = ',{}dtype={}.{})'.format(itemsep, np_nice, dtype_repr)
else:
suffix = ')'
if not strvals and data.size == 0 and data.shape != (0,):
# Special case for displaying empty data
prefix = modname + '.empty('
body = repr(tuple(map(int, data.shape)))
else:
body = np.array2string(data, precision=precision,
separator=separator,
suppress_small=suppress_small,
prefix=prefix,
max_line_width=max_line_width)
if not strvals:
# Handle special float values inf / nan
body = re.sub('\\binf\\b', np_nice + '.inf', body)
body = re.sub('\\bnan\\b', np_nice + '.nan', body)
if not newlines:
# remove newlines if we need to
body = re.sub('\n *', '', body)
formatted = prefix + body + suffix
return formatted
# Hack, make sure we also register numpy floats
self.register(np.float32)(self._type_registry[float])
def _register_builtin_extensions(self):
@self.register(float)
def format_float(data, **kwargs):
precision = kwargs.get('precision', None)
strvals = kwargs.get('sv', kwargs.get('strvals', False))
if precision is None:
text = text_type(data)
else:
text = ('{:.%df}' % precision).format(data)
if not strvals:
# Ensure the representation of inf and nan is evaluatable
# NOTE: sometimes this function is used to make json objects
# how can we ensure that this doesn't break things?
# Turns out json, never handled these cases. In the future we
# may want to add a json flag to repr2 to encourage it to
# output json-like representations.
# json.loads("[0, 1, 2, nan]")
# json.loads("[Infinity, NaN]")
# json.dumps([float('inf'), float('nan')])
import math
if math.isinf(data) or math.isnan(data):
text = "float('{}')".format(text)
return text
@self.register(slice)
def format_slice(data, **kwargs):
if kwargs.get('itemsep', ' ') == '':
return 'slice(%r,%r,%r)' % (data.start, data.stop, data.step)
else:
return _format_object(data, **kwargs)
_FORMATTER_EXTENSIONS = FormatterExtensions()
_FORMATTER_EXTENSIONS._register_builtin_extensions()
def _lazy_init():
"""
Only called in the case where we encounter an unknown type that a commonly
used external library might have. For now this is just numpy. Numpy is
ubiquitous.
"""
try:
# TODO: can we use lazy loading to prevent trying to import numpy until
# some attribute of _FORMATTER_EXTENSIONS is used?
_FORMATTER_EXTENSIONS._register_numpy_extensions()
_FORMATTER_EXTENSIONS._register_pandas_extensions()
# _FORMATTER_EXTENSIONS._register_torch_extensions()
except ImportError: # nocover
pass
_FORMATTER_EXTENSIONS._lazy_queue.append(_lazy_init)
def _format_object(val, **kwargs):
stritems = kwargs.get('si', kwargs.get('stritems', False))
strvals = stritems or kwargs.get('sv', kwargs.get('strvals', False))
base_valfunc = text_type if strvals else repr
itemstr = base_valfunc(val)
# Remove unicode repr from python2 to agree with python3 output
if PY2 and isinstance(val, string_types): # nocover
if itemstr.startswith(("u'", 'u"')):
itemstr = itemstr[1:]
return itemstr
def _format_list(list_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
sequence. In most cases this string could be evaled.
Args:
list_ (list): input list
**kwargs: nl, newlines, packed, nobr, nobraces, itemsep, trailing_sep,
strvals indent_, precision, use_numpy, with_dtype, force_dtype,
stritems, strkeys, explicit, sort, key_order, maxlen
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Example:
>>> print(_format_list([])[0])
[]
>>> print(_format_list([], nobr=True)[0])
[]
>>> print(_format_list([1], nl=0)[0])
[1]
>>> print(_format_list([1], nobr=True)[0])
1,
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
newlines = kwargs.pop('nl', kwargs.pop('newlines', 1))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
itemsep = kwargs.get('itemsep', ' ')
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
itemstrs, _leaf_info = _list_itemstrs(list_, **kwargs)
if len(itemstrs) == 0:
nobraces = False # force braces to prevent empty output
is_tuple = isinstance(list_, tuple)
is_set = isinstance(list_, (set, frozenset,))
if nobraces:
lbr, rbr = '', ''
elif is_tuple:
lbr, rbr = '(', ')'
elif is_set:
lbr, rbr = '{', '}'
else:
lbr, rbr = '[', ']'
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0 and len(itemstrs)))
# The trailing separator is always needed for single item tuples
if is_tuple and len(list_) <= 1:
trailing_sep = True
if len(itemstrs) == 0:
newlines = False
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info
def _format_dict(dict_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Kwargs:
sort (None, default=None):
if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default.
nl (int, default=None):
preferred alias for newline. can be a countdown variable
explicit (int, default=False):
can be a countdown variable.
if True, uses dict(a=b) syntax instead of {'a': b}
nobr (bool, default=False): removes outer braces
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Example:
>>> dict_ = {'a': 'edf', 'bc': 'ghi'}
>>> print(_format_dict(dict_)[0])
{
'a': 'edf',
'bc': 'ghi',
}
>>> print(_format_dict(dict_, align=True)[0])
>>> print(_format_dict(dict_, align=':')[0])
{
'a' : 'edf',
'bc': 'ghi',
}
>>> print(_format_dict(dict_, explicit=True, align=True)[0])
dict(
a ='edf',
bc='ghi',
)
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
stritems = kwargs.pop('si', kwargs.pop('stritems', False))
if stritems:
kwargs['strkeys'] = True
kwargs['strvals'] = True
kwargs['strkeys'] = kwargs.pop('sk', kwargs.pop('strkeys', False))
kwargs['strvals'] = kwargs.pop('sv', kwargs.pop('strvals', False))
newlines = kwargs.pop('nl', kwargs.pop('newlines', True))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0))
explicit = kwargs.get('explicit', False)
itemsep = kwargs.get('itemsep', ' ')
align = kwargs.get('align', False)
if align and not isinstance(align, string_types):
kvsep = kwargs.get('kvsep', ': ')
if kwargs.get('explicit', False):
kvsep = '='
align = kvsep
if len(dict_) == 0:
retstr = 'dict()' if explicit else '{}'
_leaf_info = None
else:
itemstrs, _leaf_info = _dict_itemstrs(dict_, **kwargs)
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr, align)
return retstr, _leaf_info
def _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr, align=False):
"""
Joins string-ified items with separators newlines and container-braces.
"""
# positive newlines means start counting from the root
use_newline = newlines > 0
# negative countdown values mean start counting from the leafs
# if compact_brace < 0:
# compact_brace = (-compact_brace) >= _leaf_info['max_height']
if newlines < 0:
use_newline = (-newlines) < _leaf_info['max_height']
if use_newline:
sep = ',\n'
if nobraces:
body_str = sep.join(itemstrs)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
retstr = body_str
else:
if compact_brace:
# Why must we modify the indentation below and not here?
# prefix = ''
# rest = [ub.indent(s, prefix) for s in itemstrs[1:]]
# indented = itemstrs[0:1] + rest
indented = itemstrs
else:
import ubelt as ub
prefix = ' ' * 4
indented = [ub.indent(s, prefix) for s in itemstrs]
if align:
indented = _align_lines(indented, character=align)
body_str = sep.join(indented)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
if compact_brace:
# Why can we modify the indentation here but not above?
braced_body_str = (lbr + body_str.replace('\n', '\n ') + rbr)
else:
braced_body_str = (lbr + '\n' + body_str + '\n' + rbr)
retstr = braced_body_str
else:
sep = ',' + itemsep
body_str = sep.join(itemstrs)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
retstr = (lbr + body_str + rbr)
return retstr
def _dict_itemstrs(dict_, **kwargs):
"""
Create a string representation for each item in a dict.
Args:
dict_ (dict): the dict
**kwargs: explicit, precision, kvsep, strkeys, _return_info, cbr,
compact_brace, sort
Ignore:
from ubelt.util_format import _dict_itemstrs
import xinspect
print(', '.join(xinspect.get_kwargs(_dict_itemstrs, max_depth=0).keys()))
Example:
>>> from ubelt.util_format import *
>>> dict_ = {'b': .1, 'l': 'st', 'g': 1.0, 's': 10, 'm': 0.9, 'w': .5}
>>> kwargs = {'strkeys': True}
>>> itemstrs, _ = _dict_itemstrs(dict_, **kwargs)
>>> char_order = [p[0] for p in itemstrs]
>>> assert char_order == ['b', 'g', 'l', 'm', 's', 'w']
"""
import ubelt as ub
explicit = kwargs.get('explicit', False)
kwargs['explicit'] = _rectify_countdown_or_bool(explicit)
precision = kwargs.get('precision', None)
kvsep = kwargs.get('kvsep', ': ')
if explicit:
kvsep = '='
def make_item_str(key, val):
if explicit or kwargs.get('strkeys', False):
key_str = text_type(key)
else:
key_str = repr2(key, precision=precision, newlines=0)
prefix = key_str + kvsep
kwargs['_return_info'] = True
val_str, _leaf_info = repr2(val, **kwargs)
# If the first line does not end with an open nest char
# (e.g. for ndarrays), otherwise we need to worry about
# residual indentation.
pos = val_str.find('\n')
first_line = val_str if pos == -1 else val_str[:pos]
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
if compact_brace or not first_line.rstrip().endswith(tuple('([{<')):
rest = '' if pos == -1 else val_str[pos:]
# val_str = first_line.lstrip() + rest
val_str = first_line + rest
if '\n' in prefix:
# Fix issue with keys that span new lines
item_str = prefix + val_str
else:
item_str = ub.hzcat([prefix, val_str])
else:
item_str = prefix + val_str
return item_str, _leaf_info
items = list(iteritems(dict_))
_tups = [make_item_str(key, val) for (key, val) in items]
itemstrs = [t[0] for t in _tups]
max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0
_leaf_info = {
'max_height': max_height + 1,
}
sort = kwargs.get('sort', None)
if sort is None:
# if sort is None, force orderings on unordered collections like dicts,
# but keep ordering of ordered collections like OrderedDicts.
sort = True
if isinstance(dict_, collections.OrderedDict):
# never sort ordered dicts; they are perfect just the way they are!
sort = False
if sort:
key = sort if callable(sort) else None
itemstrs = _sort_itemstrs(items, itemstrs, key)
return itemstrs, _leaf_info
def _list_itemstrs(list_, **kwargs):
"""
Create a string representation for each item in a list.
Args:
list_ (Sequence):
**kwargs: _return_info, sort
"""
items = list(list_)
kwargs['_return_info'] = True
_tups = [repr2(item, **kwargs) for item in items]
itemstrs = [t[0] for t in _tups]
max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0
_leaf_info = {
'max_height': max_height + 1,
}
sort = kwargs.get('sort', None)
if sort is None:
# if sort is None, force orderings on unordered collections like sets,
# but keep ordering of ordered collections like lists.
sort = isinstance(list_, (set, frozenset))
if sort:
key = sort if callable(sort) else None
itemstrs = _sort_itemstrs(items, itemstrs, key)
return itemstrs, _leaf_info
def _sort_itemstrs(items, itemstrs, key=None):
"""
Equivalent to ``sorted(items)`` except if ``items`` are unorderable, then
string values are used to define an ordering.
"""
# First try to sort items by their normal values
# If that doesnt work, then sort by their string values
import ubelt as ub
try:
# Set ordering is not unique. Sort by strings values instead.
if len(items) > 0 and isinstance(items[0], (set, frozenset)):
raise TypeError
sortx = ub.argsort(items, key=key)
except TypeError:
sortx = ub.argsort(itemstrs, key=key)
itemstrs = [itemstrs[x] for x in sortx]
return itemstrs
def _rectify_countdown_or_bool(count_or_bool):
"""
used by recursive functions to specify which level to turn a bool on in
counting down yields True, True, ..., False
counting up yields False, False, False, ... True
Args:
count_or_bool (bool | int): if positive and an integer, it will count
down, otherwise it will remain the same.
Returns:
int or bool: count_or_bool_
Example:
>>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> a8 = (_rectify_countdown_or_bool(None))
>>> result = [a1, a2, a3, a4, a5, a6, a7, a8]
>>> print(result)
[1, 0, 0, -1, -2, True, False, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
elif count_or_bool > 0:
count_or_bool_ = count_or_bool - 1
else:
# We dont countup negatives anymore
count_or_bool_ = count_or_bool
else:
count_or_bool_ = False
return count_or_bool_
def _align_text(text, character='=', replchar=None, pos=0):
r"""
Left justifies text on the left side of character
Args:
text (str): text to align
character (str): character to align at
replchar (str): replacement character (default=None)
Returns:
str: new_text
Example:
>>> character = '='
>>> text = 'a = b=\none = two\nthree = fish\n'
>>> print(text)
>>> result = (_align_text(text, '='))
>>> print(result)
a = b=
one = two
three = fish
"""
line_list = text.splitlines()
new_lines = _align_lines(line_list, character, replchar, pos=pos)
new_text = '\n'.join(new_lines)
return new_text
def _align_lines(line_list, character='=', replchar=None, pos=0):
r"""
Left justifies text on the left side of character
Args:
line_list (list of strs):
character (str):
pos (int or list or None): does one alignment for all chars beyond this
column position. If pos is None, then all chars are aligned.
Returns:
list: new_lines
Example:
>>> line_list = 'a = b\none = two\nthree = fish'.split('\n')
>>> character = '='
>>> new_lines = _align_lines(line_list, character)
>>> result = ('\n'.join(new_lines))
>>> print(result)
a = b
one = two
three = fish
Example:
>>> line_list = 'foofish:\n a = b\n one = two\n three = fish'.split('\n')
>>> character = '='
>>> new_lines = _align_lines(line_list, character)
>>> result = ('\n'.join(new_lines))
>>> print(result)
foofish:
a = b
one = two
three = fish
Example:
>>> import ubelt as ub
>>> character = ':'
>>> text = ub.codeblock('''
{'max': '1970/01/01 02:30:13',
'mean': '1970/01/01 01:10:15',
'min': '1970/01/01 00:01:41',
'range': '2:28:32',
'std': '1:13:57',}''').split('\n')
>>> new_lines = _align_lines(text, ':', ' :')
>>> result = '\n'.join(new_lines)
>>> print(result)
{'max' : '1970/01/01 02:30:13',
'mean' : '1970/01/01 01:10:15',
'min' : '1970/01/01 00:01:41',
'range' : '2:28:32',
'std' : '1:13:57',}
Example:
>>> line_list = 'foofish:\n a = b = c\n one = two = three\nthree=4= fish'.split('\n')
>>> character = '='
>>> # align the second occurence of a character
>>> new_lines = _align_lines(line_list, character, pos=None)
>>> print(('\n'.join(line_list)))
>>> result = ('\n'.join(new_lines))
>>> print(result)
foofish:
a = b = c
one = two = three
three=4 = fish
"""
import re
# FIXME: continue to fix ansi
if pos is None:
# Align all occurences
num_pos = max([line.count(character) for line in line_list])
pos = list(range(num_pos))
# Allow multiple alignments
if isinstance(pos, list):
pos_list = pos
# recursive calls
new_lines = line_list
for pos in pos_list:
new_lines = _align_lines(new_lines, character=character,
replchar=replchar, pos=pos)
return new_lines
# base case
if replchar is None:
replchar = character
# the pos-th character to align
lpos = pos
rpos = lpos + 1
tup_list = [line.split(character) for line in line_list]
handle_ansi = True
if handle_ansi: # nocover
# Remove ansi from length calculation
# References: http://stackoverflow.com/questions/14693701remove-ansi
ansi_escape = re.compile(r'\x1b[^m]*m')
# Find how much padding is needed
maxlen = 0
for tup in tup_list:
if len(tup) >= rpos + 1:
if handle_ansi: # nocover
tup = [ansi_escape.sub('', x) for x in tup]
left_lenlist = list(map(len, tup[0:rpos]))
left_len = sum(left_lenlist) + lpos * len(replchar)
maxlen = max(maxlen, left_len)
# Pad each line to align the pos-th occurence of the chosen character
new_lines = []
for tup in tup_list:
if len(tup) >= rpos + 1:
lhs = character.join(tup[0:rpos])
rhs = character.join(tup[rpos:])
# pad the new line with requested justification
newline = lhs.ljust(maxlen) + replchar + rhs
new_lines.append(newline)
else:
new_lines.append(replchar.join(tup))
return new_lines
if __name__ == '__main__':
"""
CommandLine:
python ~/code/ubelt/ubelt/util_format.py all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| apache-2.0 |
phoebe-project/phoebe2-docs | 2.0/tutorials/ecc.py | 1 | 2393 | #!/usr/bin/env python
# coding: utf-8
# Eccentricity (Volume Conservation)
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.0,<2.1"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Relevant Parameters
# ----------------------------
#
# In[3]:
print b.get(qualifier='ecc')
# In[4]:
print b.get(qualifier='ecosw', context='component')
# In[5]:
print b.get(qualifier='esinw', context='component')
# Relevant Constraints
# -----------------------------
# In[6]:
b.filter(qualifier='pot', context='constraint')
# In[7]:
print b.get(qualifier='pot', component='primary', context='constraint')
# In[8]:
print b.get(qualifier='ecosw', context='constraint')
# In[9]:
print b.get(qualifier='esinw', context='constraint')
# Influence on Meshes (potentials, volumes)
# ----------------------------
#
# In[10]:
b.add_dataset('mesh', times=np.linspace(0,1,11))
# In[11]:
b.set_value('ecc', 0.2)
# In[12]:
b.run_compute()
# In[13]:
print b['pot@primary@model']
# In[14]:
ax, artists = b['mesh01'].plot(x='times', y='pot')
# In[15]:
print b['rpole@primary@model']
# In[16]:
axs, artists = b['mesh01'].plot(x='times', y='rpole')
# In[17]:
print b['volume@primary@model']
# In[18]:
ax, artists = b['mesh01'].plot(x='times', y='volume')
# In[19]:
b.remove_dataset('mesh01')
# Influence on Radial Velocities
# ----------------------------------
#
# In[20]:
b.add_dataset('rv', times=np.linspace(0,1,51))
# In[21]:
b.run_compute()
# In[22]:
axs, artists = b.plot()
# In[23]:
b.remove_dataset('rv01')
# Influence on Light Curves (fluxes)
# -----------------------------------------
#
# In[24]:
b.add_dataset('lc', times=np.linspace(0,1,51))
# In[25]:
b.run_compute()
# In[26]:
axs, artists = b.plot()
| gpl-3.0 |
sgould/fun_and_games | plot_citations.py | 1 | 4095 | #!/usr/bin/env python
#
# Script to extract and plot Google Scholar citations. Useful for reporting track record in grant applications, etc.
#
import urllib.request
from html.parser import HTMLParser
import re
import seaborn as sea
import matplotlib.pyplot as plt
sea.set(font_scale=1.2)
from datetime import datetime
import numpy as np
class GoogleScholarHTMLParser(HTMLParser):
"""Parser to extract citation information from Google Scholar page."""
def __init__(self):
super(GoogleScholarHTMLParser, self).__init__()
self.insideSpanClass = []
self.insideTdClass = []
self.citeYears = []
self.citeCounts = []
self.citeSummaryData = []
@staticmethod
def get_attr(attrs, key='class'):
for (k, v) in attrs:
if k == key:
return v
return None
def handle_starttag(self, tag, attrs):
if (tag == 'span'):
self.insideSpanClass.append(self.get_attr(attrs, 'class'))
if (tag == 'td'):
self.insideTdClass.append(self.get_attr(attrs, 'class'))
def handle_data(self, data):
if len(self.insideSpanClass) > 0:
if self.insideSpanClass[-1] == 'gsc_g_t':
self.citeYears.append(int(data.strip()))
if self.insideSpanClass[-1] == 'gsc_g_al':
self.citeCounts.append(int(data.strip()))
if len(self.insideTdClass) > 0:
if self.insideTdClass[-1] == 'gsc_rsb_std':
self.citeSummaryData.append(int(data.strip()))
def handle_endtag(self, tag):
if (tag == 'span'):
self.insideSpanClass.pop()
if (tag == 'td'):
self.insideTdClass.pop()
def bar_plot_with_trend(years, counts, prediction=None, width=0.8):
"""Generate a bar plot showing trend line and optional prediction."""
if prediction is not None:
plt.bar(years[-1], prediction, width, align='center', color=[1.0, 1.0, 1.0])
plt.plot([year for year in years[-2:]], [counts[-2], prediction], 'ko--', lw=2)
plt.bar(years, counts, width, color=[0.75, 0.75, 0.75])
plt.plot(years, counts, 'ko-', lw=2)
plt.xticks(years, years)
if __name__ == "__main__":
# default Google Scholar page to process
URL = r"https://scholar.google.com.au/citations?user=YvdzeM8AAAAJ&hl=en"
# request user input for Google Scholar URL
import tkinter as tk
from tkinter import simpledialog
app_wnd = tk.Tk()
app_wnd.withdraw()
URL = simpledialog.askstring("Plot Citations", "Enter the full URL for the Google Scholar page you wish to plot:",
initialvalue=URL, parent=app_wnd)
if URL is None:
exit(0)
# fetch Google Scholar page and extract statistics
print("Fetching Google Scholar page...")
response = urllib.request.urlopen(URL)
html = str(response.read())
response.close()
html = re.sub("\\\\t|\\\\r\\\\n", "", html)
print("Parsing HTML...")
parser = GoogleScholarHTMLParser()
parser.feed(html)
print("...{} total citations".format(parser.citeSummaryData[0]))
#print(parser.citeCounts)
year_fraction = datetime.now().timetuple().tm_yday / 365.0
if datetime.now().timetuple().tm_year > parser.citeYears[-1]:
print("{:0.1f}% of year with no data for this year".format(100.0 * year_fraction))
year_prediction = None
else:
year_prediction = int(parser.citeCounts[-1] / year_fraction)
print("{:0.1f}% of year with {} citations ({} predicted)".format(100.0 * year_fraction, parser.citeCounts[-1], year_prediction))
print("Plotting Citations...")
plt.figure()
plt.subplot(2, 1, 1)
bar_plot_with_trend(parser.citeYears, parser.citeCounts, year_prediction)
plt.xlabel('Year'); plt.ylabel('Citations per Year')
plt.subplot(2, 1, 2)
counts = np.cumsum(parser.citeCounts)
bar_plot_with_trend(parser.citeYears, counts, year_prediction + counts[-2])
plt.xlabel('Year'); plt.ylabel('Total Citations')
plt.tight_layout(pad=0.2)
plt.show()
| mit |
followyourheart/SFrame | oss_src/unity/python/sframe/test/test_sarray.py | 8 | 67699 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from ..data_structures.sarray import SArray
from ..util.timezone import GMT
import pandas as pd
import numpy as np
import unittest
import random
import datetime as dt
import copy
import os
import math
import shutil
import array
import util
import time
import itertools
import warnings
import functools
import tempfile
import sys
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArrayTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.bool_data = [x % 2 == 0 for x in range(10)]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0)),None]
self.datetime_data2 = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111).replace(tzinfo=GMT(0.0)),None]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["abc", "def", "hello", "world", "pika", "chu", "hello", "world"]
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertSequenceEqual(list(_sarray.head(_sarray.size())), _data)
def __test_creation(self, data, dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data, dtype)
self.__test_equal(s, expected, dtype)
s = SArray(pd.Series(data), dtype)
self.__test_equal(s, expected, dtype)
def __test_creation_type_inference(self, data, expected_dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data)
self.__test_equal(s, expected, expected_dtype)
s = SArray(pd.Series(data))
self.__test_equal(s, expected, expected_dtype)
def test_creation(self):
self.__test_creation(self.int_data, int, self.int_data)
self.__test_creation(self.int_data, float, [float(x) for x in self.int_data])
self.__test_creation(self.int_data, str, [str(x) for x in self.int_data])
self.__test_creation(self.float_data, float, self.float_data)
self.assertRaises(TypeError, self.__test_creation, [self.float_data, int])
self.__test_creation(self.string_data, str, self.string_data)
self.assertRaises(TypeError, self.__test_creation, [self.string_data, int])
self.assertRaises(TypeError, self.__test_creation, [self.string_data, float])
expected_output = [chr(x) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(SArray(self.url, str), expected_output, str)
self.__test_creation(self.vec_data, array.array, self.vec_data)
self.__test_creation(self.list_data, list, self.list_data)
self.__test_creation(self.dict_data, dict, self.dict_data)
# test with type inference
self.__test_creation_type_inference(self.int_data, int, self.int_data)
self.__test_creation_type_inference(self.float_data, float, self.float_data)
self.__test_creation_type_inference(self.bool_data, int, [int(x) for x in self.bool_data])
self.__test_creation_type_inference(self.string_data, str, self.string_data)
self.__test_creation_type_inference(self.vec_data, array.array, self.vec_data)
self.__test_creation_type_inference([np.bool_(True),np.bool_(False)],int,[1,0])
self.__test_creation((1,2,3,4), int, [1,2,3,4])
def test_list_with_none_creation(self):
tlist=[[2,3,4],[5,6],[4,5,10,None]]
g=SArray(tlist)
self.assertEqual(len(g), len(tlist))
for i in range(len(tlist)):
self.assertEqual(g[i], tlist[i])
def test_list_with_array_creation(self):
import array
t = array.array('d',[1.1,2,3,4,5.5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), float)
glist = list(g)
for i in range(len(glist)):
self.assertAlmostEqual(glist[i], t[i])
t = array.array('i',[1,2,3,4,5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), int)
glist = list(g)
for i in range(len(glist)):
self.assertEqual(glist[i], t[i])
def test_in(self):
sint = SArray(self.int_data, int)
self.assertTrue(5 in sint)
self.assertFalse(20 in sint)
sstr = SArray(self.string_data, str)
self.assertTrue("abc" in sstr)
self.assertFalse("zzzzzz" in sstr)
self.assertFalse("" in sstr)
self.__test_equal(sstr.contains("ll"), ["ll" in i for i in self.string_data], int)
self.__test_equal(sstr.contains("a"), ["a" in i for i in self.string_data], int)
def test_save_load(self):
# Make sure these files don't exist before testing
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
sint = SArray(self.int_data, int)
sflt = SArray([float(x) for x in self.int_data], float)
sstr = SArray([str(x) for x in self.int_data], str)
svec = SArray(self.vec_data, array.array)
slist = SArray(self.list_data, list)
sdict = SArray(self.dict_data, dict)
sint.save('intarr.sidx')
sflt.save('fltarr.sidx')
sstr.save('strarr.sidx')
svec.save('vecarr.sidx')
slist.save('listarr.sidx')
sdict.save('dictarr.sidx')
sint2 = SArray('intarr.sidx')
sflt2 = SArray('fltarr.sidx')
sstr2 = SArray('strarr.sidx')
svec2 = SArray('vecarr.sidx')
slist2 = SArray('listarr.sidx')
sdict2 = SArray('dictarr.sidx')
self.assertRaises(IOError, lambda: SArray('__no_such_file__.sidx'))
self.__test_equal(sint2, self.int_data, int)
self.__test_equal(sflt2, [float(x) for x in self.int_data], float)
self.__test_equal(sstr2, [str(x) for x in self.int_data], str)
self.__test_equal(svec2, self.vec_data, array.array)
self.__test_equal(slist2, self.list_data, list)
self.__test_equal(sdict2, self.dict_data, dict)
# Bad permission
# Windows has a way more ridiculous way of setting permissions. I'm
# sure windows will stop us from writing if we don't have
# permission...probably no reason to test
if sys.platform != 'win32':
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sint.save(os.path.join(test_dir, 'bad.sidx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sint3 = SArray(os.path.join(test_dir, 'bad.sidx'))
os.removedirs(test_dir)
#cleanup
del sint2
del sflt2
del sstr2
del svec2
del slist2
del sdict2
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
def test_save_load_text(self):
self._remove_single_file('txt_int_arr.txt')
sint = SArray(self.int_data, int)
sint.save('txt_int_arr.txt')
self.assertTrue(os.path.exists('txt_int_arr.txt'))
f = open('txt_int_arr.txt')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr.txt')
self._remove_single_file('txt_int_arr')
sint.save('txt_int_arr', format='text')
self.assertTrue(os.path.exists('txt_int_arr'))
f = open('txt_int_arr')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr')
def _remove_single_file(self, filename):
try:
os.remove(filename)
except:
pass
def _remove_sarray_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
shutil.rmtree(f)
def test_transform(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000), int)
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
# test transform with missing values
sa = SArray([1,2,3,None,4,5])
sa1 = sa.apply(lambda x : x + 1)
self.__test_equal(sa1, [2,3,4,None,5,6], int)
def test_transform_with_multiple_lambda(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
sa2_int = sa_int.apply(lambda val: val + 1, int)
expected_output = [x for x in range(ord('a') + 1, ord('a') + 26 + 1)]
self.__test_equal(sa2_int, expected_output, int)
def test_transform_with_exception(self):
sa_char = SArray(['a' for i in xrange(10000)], str)
# # type mismatch exception
self.assertRaises(TypeError, lambda: sa_char.apply(lambda char: char, int).head(1))
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0, float))
def test_transform_with_type_inference(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char))
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
sa_bool = sa_char.apply(lambda char: ord(char) > ord('c'))
expected_output = [int(x > ord('c')) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_bool, expected_output, int)
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0))
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000))
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
def test_transform_on_lists(self):
sa_int = SArray(self.int_data, int)
sa_vec2 = sa_int.apply(lambda x: [x, x+1, str(x)])
expected = [[i, i + 1, str(i)] for i in self.int_data]
self.__test_equal(sa_vec2, expected, list)
sa_int_again = sa_vec2.apply(lambda x: int(x[0]))
self.__test_equal(sa_int_again, self.int_data, int)
# transform from vector to vector
sa_vec = SArray(self.vec_data, array.array)
sa_vec2 = sa_vec.apply(lambda x: x)
self.__test_equal(sa_vec2, self.vec_data, array.array)
# transform on list
sa_list = SArray(self.list_data, list)
sa_list2 = sa_list.apply(lambda x: x)
self.__test_equal(sa_list2, self.list_data, list)
# transform dict to list
sa_dict = SArray(self.dict_data, dict)
sa_list = sa_dict.apply(lambda x: x.keys())
self.__test_equal(sa_list, [x.keys() for x in self.dict_data], list)
def test_transform_dict(self):
# lambda accesses dict
sa_dict = SArray([{'a':1}, {1:2}, {'c': 'a'}, None], dict)
sa_bool_r = sa_dict.apply(lambda x: x.has_key('a') if x != None else None, skip_undefined=False)
expected_output = [1, 0, 0, None]
self.__test_equal(sa_bool_r, expected_output, int)
# lambda returns dict
expected_output = [{'a':1}, {1:2}, None, {'c': 'a'}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.apply(lambda x: x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter_dict(self):
data = [{'a':1}, {1:2}, None, {'c': 'a'}]
expected_output = [{'a':1}]
sa_dict = SArray(expected_output, dict)
ret = sa_dict.filter(lambda x: x.has_key('a'))
self.__test_equal(ret, expected_output, dict)
# try second time to make sure the lambda system still works
expected_output = [{1:2}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.filter(lambda x: x.has_key(1))
self.__test_equal(lambda_out, expected_output, dict)
def test_filter(self):
# test empty
s = SArray([], float)
no_change = s.filter(lambda x : x == 0)
self.assertEqual(no_change.size(), 0)
# test normal case
s = SArray(self.int_data, int)
middle_of_array = s.filter(lambda x: x > 3 and x < 8)
self.assertEqual(list(middle_of_array.head(10)), [x for x in range(4,8)])
# test normal string case
s = SArray(self.string_data, str)
exp_val_list = [x for x in self.string_data if x != 'world']
# Remove all words whose second letter is not in the first half of the alphabet
second_letter = s.filter(lambda x: len(x) > 1 and (ord(x[1]) > ord('a')) and (ord(x[1]) < ord('n')))
self.assertEqual(list(second_letter.head(10)), exp_val_list)
# test not-a-lambda
def a_filter_func(x):
return ((x > 4.4) and (x < 6.8))
s = SArray(self.int_data, float)
another = s.filter(a_filter_func)
self.assertEqual(list(another.head(10)), [5.,6.])
sa = SArray(self.float_data)
# filter by self
sa2 = sa[sa]
self.assertEquals(list(sa.head(10)), list(sa2.head(10)))
# filter by zeros
sa_filter = SArray([0,0,0,0,0,0,0,0,0,0])
sa2 = sa[sa_filter]
self.assertEquals(len(sa2), 0)
# filter by wrong size
sa_filter = SArray([0,2,5])
with self.assertRaises(IndexError):
sa2 = sa[sa_filter]
def test_any_all(self):
s = SArray([0,1,2,3,4,5,6,7,8,9], int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
s = SArray([0,0,0,0,0], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray(self.string_data, str)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
s = SArray(self.int_data, int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
# test empty
s = SArray([], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), True)
s = SArray([[], []], array.array)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray([[],[1.0]], array.array)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
def test_astype(self):
# test empty
s = SArray([], int)
as_out = s.astype(float)
self.assertEqual(as_out.dtype(), float)
# test float -> int
s = SArray(map(lambda x: x+0.2, self.float_data), float)
as_out = s.astype(int)
self.assertEqual(list(as_out.head(10)), self.int_data)
# test int->string
s = SArray(self.int_data, int)
as_out = s.astype(str)
self.assertEqual(list(as_out.head(10)), map(lambda x: str(x), self.int_data))
i_out = as_out.astype(int)
self.assertEqual(list(i_out.head(10)), list(s.head(10)))
s = SArray(self.vec_data, array.array)
with self.assertRaises(RuntimeError):
s.astype(int)
with self.assertRaises(RuntimeError):
s.astype(float)
s = SArray(["a","1","2","3"])
with self.assertRaises(RuntimeError):
s.astype(int)
self.assertEqual(list(s.astype(int,True).head(4)), [None,1,2,3])
s = SArray(["[1 2 3]","[4;5]"])
ret = list(s.astype(array.array).head(2))
self.assertEqual(ret, [array.array('d',[1,2,3]),array.array('d',[4,5])])
s = SArray(["[1,\"b\",3]","[4,5]"])
ret = list(s.astype(list).head(2))
self.assertEqual(ret, [[1,"b",3],[4,5]])
s = SArray(["{\"a\":2,\"b\":3}","{}"])
ret = list(s.astype(dict).head(2))
self.assertEqual(ret, [{"a":2,"b":3},{}])
s = SArray(["[1abc]"])
ret = list(s.astype(list).head(1))
self.assertEqual(ret, [["1abc"]])
s = SArray(["{1xyz:1a,2b:2}"])
ret = list(s.astype(dict).head(1))
self.assertEqual(ret, [{"1xyz":"1a","2b":2}])
def test_clip(self):
# invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.clip(25,26)
with self.assertRaises(RuntimeError):
s.clip_lower(25)
with self.assertRaises(RuntimeError):
s.clip_upper(26)
# int w/ int, test lower and upper functions too
# int w/float, no change
s = SArray(self.int_data, int)
clip_out = s.clip(3,7).head(10)
# test that our list isn't cast to float if nothing happened
clip_out_nc = s.clip(0.2, 10.2).head(10)
lclip_out = s.clip_lower(3).head(10)
rclip_out = s.clip_upper(7).head(10)
self.assertEqual(len(clip_out), len(self.int_data))
self.assertEqual(len(lclip_out), len(self.int_data))
self.assertEqual(len(rclip_out), len(self.int_data))
for i in range(0,len(clip_out)):
if i < 2:
self.assertEqual(clip_out[i], 3)
self.assertEqual(lclip_out[i], 3)
self.assertEqual(rclip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
elif i > 6:
self.assertEqual(clip_out[i], 7)
self.assertEqual(lclip_out[i], self.int_data[i])
self.assertEqual(rclip_out[i], 7)
self.assertEqual(clip_out_nc[i], self.int_data[i])
else:
self.assertEqual(clip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
# int w/float, change
# float w/int
# float w/float
clip_out = s.clip(2.8, 7.2).head(10)
fs = SArray(self.float_data, float)
ficlip_out = fs.clip(3, 7).head(10)
ffclip_out = fs.clip(2.8, 7.2).head(10)
for i in range(0,len(clip_out)):
if i < 2:
self.assertAlmostEqual(clip_out[i], 2.8)
self.assertAlmostEqual(ffclip_out[i], 2.8)
self.assertAlmostEqual(ficlip_out[i], 3.)
elif i > 6:
self.assertAlmostEqual(clip_out[i], 7.2)
self.assertAlmostEqual(ffclip_out[i], 7.2)
self.assertAlmostEqual(ficlip_out[i], 7.)
else:
self.assertAlmostEqual(clip_out[i], self.float_data[i])
self.assertAlmostEqual(ffclip_out[i], self.float_data[i])
self.assertAlmostEqual(ficlip_out[i], self.float_data[i])
vs = SArray(self.vec_data, array.array);
clipvs = vs.clip(3, 7).head(100)
self.assertEqual(len(clipvs), len(self.vec_data));
for i in range(0, len(clipvs)):
a = clipvs[i]
b = self.vec_data[i]
self.assertEqual(len(a), len(b))
for j in range(0, len(b)):
if b[j] < 3:
b[j] = 3
elif b[j] > 7:
b[j] = 7
self.assertEqual(a, b)
def test_missing(self):
s=SArray(self.int_data, int)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.int_data + [None], int)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.float_data, float)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.float_data + [None], float)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.string_data, str)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.string_data + [None], str)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.vec_data, array.array)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.vec_data + [None], array.array)
self.assertEqual(s.num_missing(), 1)
def test_nonzero(self):
# test empty
s = SArray([],int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test all nonzero
s = SArray(self.float_data, float)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.float_data))
# test all zero
s = SArray([0 for x in range(0,10)], int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test strings
str_list = copy.deepcopy(self.string_data)
str_list.append("")
s = SArray(str_list, str)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.string_data))
def test_std_var(self):
# test empty
s = SArray([], int)
self.assertTrue(s.std() is None)
self.assertTrue(s.var() is None)
# increasing ints
s = SArray(self.int_data, int)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# increasing floats
s = SArray(self.float_data, float)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# vary ddof
self.assertAlmostEqual(s.var(ddof=3), 11.7857143)
self.assertAlmostEqual(s.var(ddof=6), 20.625)
self.assertAlmostEqual(s.var(ddof=9), 82.5)
self.assertAlmostEqual(s.std(ddof=3), 3.4330328)
self.assertAlmostEqual(s.std(ddof=6), 4.5414755)
self.assertAlmostEqual(s.std(ddof=9), 9.08295106)
# bad ddof
with self.assertRaises(RuntimeError):
s.var(ddof=11)
with self.assertRaises(RuntimeError):
s.std(ddof=11)
# bad type
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.std()
with self.assertRaises(RuntimeError):
s.var()
# overflow test
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertAlmostEqual(s.var(), 21267647932558653957237540927630737409.0)
self.assertAlmostEqual(s.std(), 4611686018427387900.0)
def test_tail(self):
# test empty
s = SArray([], int)
self.assertEqual(len(s.tail()), 0)
# test standard tail
s = SArray([x for x in range(0,40)], int)
self.assertEqual(s.tail(), [x for x in range(30,40)])
# smaller amount
self.assertEqual(s.tail(3), [x for x in range(37,40)])
# larger amount
self.assertEqual(s.tail(40), [x for x in range(0,40)])
# too large
self.assertEqual(s.tail(81), [x for x in range(0,40)])
def test_max_min_sum_mean(self):
# negative and positive
s = SArray([-2,-1,0,1,2], int)
self.assertEqual(s.max(), 2)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), 0)
self.assertAlmostEqual(s.mean(), 0.)
# test valid and invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.max()
with self.assertRaises(RuntimeError):
s.min()
with self.assertRaises(RuntimeError):
s.sum()
with self.assertRaises(RuntimeError):
s.mean()
s = SArray(self.int_data, int)
self.assertEqual(s.max(), 10)
self.assertEqual(s.min(), 1)
self.assertEqual(s.sum(), 55)
self.assertAlmostEqual(s.mean(), 5.5)
s = SArray(self.float_data, float)
self.assertEqual(s.max(), 10.)
self.assertEqual(s.min(), 1.)
self.assertEqual(s.sum(), 55.)
self.assertAlmostEqual(s.mean(), 5.5)
# test all negative
s = SArray(map(lambda x: x*-1, self.int_data), int)
self.assertEqual(s.max(), -1)
self.assertEqual(s.min(), -10)
self.assertEqual(s.sum(), -55)
self.assertAlmostEqual(s.mean(), -5.5)
# test empty
s = SArray([], float)
self.assertTrue(s.max() is None)
self.assertTrue(s.min() is None)
self.assertTrue(s.sum() is None)
self.assertTrue(s.mean() is None)
# test big ints
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertEqual(s.max(), huge_int)
self.assertEqual(s.min(), 1)
# yes, we overflow
self.assertEqual(s.sum(), (huge_int+1)*-1)
# ...but not here
self.assertAlmostEqual(s.mean(), 4611686018427387904.)
a = SArray([[1,2],[1,2],[1,2]], array.array)
self.assertEqual(a.sum(), array.array('d', [3,6]))
self.assertEqual(a.mean(), array.array('d', [1,2]))
with self.assertRaises(RuntimeError):
a.max()
with self.assertRaises(RuntimeError):
a.min()
a = SArray([[1,2],[1,2],[1,2,3]], array.array)
with self.assertRaises(RuntimeError):
a.sum()
with self.assertRaises(RuntimeError):
a.mean()
def test_max_min_sum_mean_missing(self):
# negative and positive
s = SArray([-2,0,None,None,None], int)
self.assertEqual(s.max(), 0)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), -2)
self.assertAlmostEqual(s.mean(), -1)
s = SArray([None,None,None], int)
self.assertEqual(s.max(), None)
self.assertEqual(s.min(), None)
self.assertEqual(s.sum(), 0)
self.assertEqual(s.mean(), None)
def test_python_special_functions(self):
s = SArray([], int)
self.assertEqual(len(s), 0)
self.assertEqual(str(s), '[]')
self.assertEqual(bool(s), False)
# increasing ints
s = SArray(self.int_data, int)
self.assertEqual(len(s), len(self.int_data))
self.assertEqual(list(s), self.int_data)
self.assertEqual(bool(s), True)
realsum = sum(self.int_data)
sum1 = sum([x for x in s])
sum2 = s.sum()
sum3 = s.apply(lambda x:x, int).sum()
self.assertEquals(sum1, realsum)
self.assertEquals(sum2, realsum)
self.assertEquals(sum3, realsum)
def test_scalar_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
t = SArray(s, int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(t - 1, list(s - 1), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / 2, list(s / 2.0), float)
self.__test_equal(t * 2, list(s * 2), int)
self.__test_equal(t < 5, list(s < 5), int)
self.__test_equal(t > 5, list(s > 5), int)
self.__test_equal(t <= 5, list(s <= 5), int)
self.__test_equal(t >= 5, list(s >= 5), int)
self.__test_equal(t == 5, list(s == 5), int)
self.__test_equal(t != 5, list(s != 5), int)
self.__test_equal(t % 5, list(s % 5), int)
self.__test_equal(t // 5, list(s // 5), int)
self.__test_equal(1.5 + t, list(1.5 + s), float)
self.__test_equal(1.5 - t, list(1.5 - s), float)
self.__test_equal(2.0 / t, list(2.0 / s), float)
self.__test_equal(2 / t, list(2.0 / s), float)
self.__test_equal(2.5 * t, list(2.5 * s), float)
s=["a","b","c"]
t = SArray(s, str)
self.__test_equal(t + "x", [i + "x" for i in s], str)
with self.assertRaises(RuntimeError):
t - 'x'
with self.assertRaises(RuntimeError):
t * 'x'
with self.assertRaises(RuntimeError):
t / 'x'
s = SArray(self.vec_data, array.array)
self.__test_equal(s + 1, [array.array('d', [float(j) + 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - 1, [array.array('d', [float(j) - 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * 2, [array.array('d', [float(j) * 2 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / 2, [array.array('d', [float(j) / 2 for j in i]) for i in self.vec_data], array.array)
s = SArray([1,2,3,4,None])
self.__test_equal(s == None, [0,0,0,0,1], int)
self.__test_equal(s != None, [1,1,1,1,0], int)
def test_vector_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
s2=np.array([5,4,3,2,1,10,9,8,7,6]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t + t2, list(s + s2), int)
self.__test_equal(t - t2, list(s - s2), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / t2, list(s.astype(float) / s2), float)
self.__test_equal(t * t2, list(s * s2), int)
self.__test_equal(t < t2, list(s < s2), int)
self.__test_equal(t > t2, list(s > s2), int)
self.__test_equal(t <= t2, list(s <= s2), int)
self.__test_equal(t >= t2, list(s >= s2), int)
self.__test_equal(t == t2, list(s == s2), int)
self.__test_equal(t != t2, list(s != s2), int)
s = SArray(self.vec_data, array.array)
self.__test_equal(s + s, [array.array('d', [float(j) + float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - s, [array.array('d', [float(j) - float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * s, [array.array('d', [float(j) * float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / s, [array.array('d', [float(j) / float(j) for j in i]) for i in self.vec_data], array.array)
t = SArray(self.float_data, float)
self.__test_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
s = SArray([1,2,3,4,None])
self.assertTrue((s==s).all())
s = SArray([1,2,3,4,None])
self.assertFalse((s!=s).any())
def test_logical_ops(self):
s=np.array([0,0,0,0,1,1,1,1]);
s2=np.array([0,1,0,1,0,1,0,1]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t & t2, list(((s & s2) > 0).astype(int)), int)
self.__test_equal(t | t2, list(((s | s2) > 0).astype(int)), int)
def test_string_operators(self):
s=["a","b","c","d","e","f","g","h","i","j"];
s2=["e","d","c","b","a","j","i","h","g","f"];
t = SArray(s, str)
t2 = SArray(s2, str)
self.__test_equal(t + t2, ["".join(x) for x in zip(s,s2)], str)
self.__test_equal(t + "x", [x + "x" for x in s], str)
self.__test_equal(t < t2, [x < y for (x,y) in zip(s,s2)], int)
self.__test_equal(t > t2, [x > y for (x,y) in zip(s,s2)], int)
self.__test_equal(t == t2, [x == y for (x,y) in zip(s,s2)], int)
self.__test_equal(t != t2, [x != y for (x,y) in zip(s,s2)], int)
self.__test_equal(t <= t2, [x <= y for (x,y) in zip(s,s2)], int)
self.__test_equal(t >= t2, [x >= y for (x,y) in zip(s,s2)], int)
def test_vector_operator_missing_propagation(self):
t = SArray([1,2,3,4,None,6,7,8,9,None], float) # missing 4th and 9th
t2 = SArray([None,4,3,2,np.nan,10,9,8,7,6], float) # missing 0th and 4th
self.assertEquals(len((t + t2).dropna()), 7);
self.assertEquals(len((t - t2).dropna()), 7);
self.assertEquals(len((t * t2).dropna()), 7);
def test_dropna(self):
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
self.assertEquals(len(t.dropna()), 6)
self.assertEquals(list(t.dropna()), no_nas)
t2 = SArray([None,np.nan])
self.assertEquals(len(t2.dropna()), 0)
self.assertEquals(list(SArray(self.int_data).dropna()), self.int_data)
self.assertEquals(list(SArray(self.float_data).dropna()), self.float_data)
def test_fillna(self):
# fillna shouldn't fill anything
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
out = t.fillna('hello')
self.assertEquals(list(out), no_nas)
# Normal integer case (float auto casted to int)
t = SArray([53,23,None,np.nan,5])
self.assertEquals(list(t.fillna(-1.0)), [53,23,-1,-1,5])
# dict type
t = SArray(self.dict_data+[None])
self.assertEquals(list(t.fillna({1:'1'})), self.dict_data+[{1:'1'}])
# list type
t = SArray(self.list_data+[None])
self.assertEquals(list(t.fillna([0,0,0])), self.list_data+[[0,0,0]])
# vec type
t = SArray(self.vec_data+[None])
self.assertEquals(list(t.fillna(array.array('f',[0.0,0.0]))), self.vec_data+[array.array('f',[0.0,0.0])])
# empty sarray
t = SArray()
self.assertEquals(len(t.fillna(0)), 0)
def test_sample(self):
sa = SArray(data=self.int_data)
sa_sample = sa.sample(.5, 9)
sa_sample2 = sa.sample(.5, 9)
self.assertEqual(sa_sample.head(), sa_sample2.head())
for i in sa_sample:
self.assertTrue(i in self.int_data)
with self.assertRaises(ValueError):
sa.sample(3)
sa_sample = SArray().sample(.5, 9)
self.assertEqual(len(sa_sample), 0)
def test_vector_slice(self):
d=[[1],[1,2],[1,2,3]]
g=SArray(d, array.array)
self.assertEqual(list(g.vector_slice(0).head()), [1,1,1])
self.assertEqual(list(g.vector_slice(0,2).head()), [None,array.array('d', [1,2]),array.array('d', [1,2])])
self.assertEqual(list(g.vector_slice(0,3).head()), [None,None,array.array('d', [1,2,3])])
g=SArray(self.vec_data, array.array);
self.__test_equal(g.vector_slice(0), self.float_data, float)
self.__test_equal(g.vector_slice(0, 2), self.vec_data, array.array)
def _my_subslice(self, arr, start=None, stop=None, step=1):
return arr.apply(lambda x: x[slice(start, stop, step)], arr.dtype())
def _slice_equality_test(self, arr, start=None, stop=None, step=1):
self.assertEqual(
list(arr.subslice(start, stop, step)),
list(self._my_subslice(arr,start,stop,step)))
def test_subslice(self):
#string slicing
g=SArray(range(1,1000, 10)).astype(str)
self._slice_equality_test(g, 0, 2);
self._slice_equality_test(g, 0, -1, 2);
self._slice_equality_test(g, -1, -3);
self._slice_equality_test(g, -1, -2, -1);
self._slice_equality_test(g, None, None, -1);
self._slice_equality_test(g, -100, -1);
#list slicing
g=SArray(range(1,10)).apply(lambda x: list(range(x)), list)
self._slice_equality_test(g, 0, 2);
self._slice_equality_test(g, 0, -1, 2);
self._slice_equality_test(g, -1, -3);
self._slice_equality_test(g, -1, -2, -1);
self._slice_equality_test(g, None, None, -1);
self._slice_equality_test(g, -100, -1);
#array slicing
import array
g=SArray(range(1,10)).apply(lambda x: array.array('d', range(x)))
self._slice_equality_test(g, 0, 2);
self._slice_equality_test(g, 0, -1, 2);
self._slice_equality_test(g, -1, -3);
self._slice_equality_test(g, -1, -2, -1);
self._slice_equality_test(g, None, None, -1);
self._slice_equality_test(g, -100, -1);
#this should fail
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).subslice(1)
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).astype(float).subslice(1)
def test_lazy_eval(self):
sa = SArray(range(-10, 10))
sa = sa + 1
sa1 = sa >= 0
sa2 = sa <= 0
sa3 = sa[sa1 & sa2]
item_count = sa3.size()
self.assertEqual(item_count, 1)
def __test_append(self, data1, data2, dtype):
sa1 = SArray(data1, dtype)
sa2 = SArray(data2, dtype)
sa3 = sa1.append(sa2)
self.__test_equal(sa3, data1 + data2, dtype)
sa3 = sa2.append(sa1)
self.__test_equal(sa3, data2 + data1, dtype)
def test_append(self):
n = len(self.int_data)
m = n / 2
self.__test_append(self.int_data[0:m], self.int_data[m:n], int)
self.__test_append(self.bool_data[0:m], self.bool_data[m:n], int)
self.__test_append(self.string_data[0:m], self.string_data[m:n], str)
self.__test_append(self.float_data[0:m], self.float_data[m:n], float)
self.__test_append(self.vec_data[0:m], self.vec_data[m:n], array.array)
self.__test_append(self.dict_data[0:m], self.dict_data[m:n], dict)
def test_append_exception(self):
val1 = [i for i in range(1, 1000)]
val2 = [str(i) for i in range(-10, 1)]
sa1 = SArray(val1, int)
sa2 = SArray(val2, str)
with self.assertRaises(RuntimeError):
sa3 = sa1.append(sa2)
def test_word_count(self):
sa = SArray(["This is someurl http://someurl!!",
"中文 应该也 行",
'Сблъсъкът между'])
expected = [{"this": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
expected2 = [{"This": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
sa1 = sa._count_words()
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected, dict)
sa1 = sa._count_words(to_lower=False)
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected2, dict)
#should fail if the input type is not string
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
sa._count_words()
def test_word_count2(self):
sa = SArray(["This is some url http://www.someurl.com!!", "Should we? Yes, we should."])
#TODO: Get some weird unicode whitespace in the Chinese and Russian tests
expected1 = [{"this": 1, "is": 1, "some": 1, "url": 1, "http://www.someurl.com!!": 1},
{"should": 1, "we?": 1, "we": 1, "yes,": 1, "should.": 1}]
expected2 = [{"this is some url http://www.someurl.com": 1},
{"should we": 1, " yes": 1, " we should.": 1}]
word_counts1 = sa._count_words()
word_counts2 = sa._count_words(delimiters=["?", "!", ","])
self.assertEquals(word_counts1.dtype(), dict)
self.__test_equal(word_counts1, expected1, dict)
self.assertEquals(word_counts2.dtype(), dict)
self.__test_equal(word_counts2, expected2, dict)
def test_ngram_count(self):
sa_word = SArray(["I like big dogs. They are fun. I LIKE BIG DOGS", "I like.", "I like big"])
sa_character = SArray(["Fun. is. fun","Fun is fun.","fu", "fun"])
# Testing word n-gram functionality
result = sa_word._count_ngrams(3)
result2 = sa_word._count_ngrams(2)
result3 = sa_word._count_ngrams(3,"word", to_lower=False)
result4 = sa_word._count_ngrams(2,"word", to_lower=False)
expected = [{'fun i like': 1, 'i like big': 2, 'they are fun': 1, 'big dogs they': 1, 'like big dogs': 2, 'are fun i': 1, 'dogs they are': 1}, {}, {'i like big': 1}]
expected2 = [{'i like': 2, 'dogs they': 1, 'big dogs': 2, 'are fun': 1, 'like big': 2, 'they are': 1, 'fun i': 1}, {'i like': 1}, {'i like': 1, 'like big': 1}]
expected3 = [{'I like big': 1, 'fun I LIKE': 1, 'I LIKE BIG': 1, 'LIKE BIG DOGS': 1, 'They are fun': 1, 'big dogs They': 1, 'like big dogs': 1, 'are fun I': 1, 'dogs They are': 1}, {}, {'I like big': 1}]
expected4 = [{'I like': 1, 'like big': 1, 'I LIKE': 1, 'BIG DOGS': 1, 'are fun': 1, 'LIKE BIG': 1, 'big dogs': 1, 'They are': 1, 'dogs They': 1, 'fun I': 1}, {'I like': 1}, {'I like': 1, 'like big': 1}]
self.assertEquals(result.dtype(), dict)
self.__test_equal(result, expected, dict)
self.assertEquals(result2.dtype(), dict)
self.__test_equal(result2, expected2, dict)
self.assertEquals(result3.dtype(), dict)
self.__test_equal(result3, expected3, dict)
self.assertEquals(result4.dtype(), dict)
self.__test_equal(result4, expected4, dict)
#Testing character n-gram functionality
result5 = sa_character._count_ngrams(3, "character")
result6 = sa_character._count_ngrams(2, "character")
result7 = sa_character._count_ngrams(3, "character", to_lower=False)
result8 = sa_character._count_ngrams(2, "character", to_lower=False)
result9 = sa_character._count_ngrams(3, "character", to_lower=False, ignore_space=False)
result10 = sa_character._count_ngrams(2, "character", to_lower=False, ignore_space=False)
result11 = sa_character._count_ngrams(3, "character", to_lower=True, ignore_space=False)
result12 = sa_character._count_ngrams(2, "character", to_lower=True, ignore_space=False)
expected5 = [{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {}, {'fun': 1}]
expected6 = [{'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected7 = [{'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {}, {'fun': 1}]
expected8 = [{'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected9 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {}, {'fun': 1}]
expected10 = [{' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected11 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {}, {'fun': 1}]
expected12 = [{' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
self.assertEquals(result5.dtype(), dict)
self.__test_equal(result5, expected5, dict)
self.assertEquals(result6.dtype(), dict)
self.__test_equal(result6, expected6, dict)
self.assertEquals(result7.dtype(), dict)
self.__test_equal(result7, expected7, dict)
self.assertEquals(result8.dtype(), dict)
self.__test_equal(result8, expected8, dict)
self.assertEquals(result9.dtype(), dict)
self.__test_equal(result9, expected9, dict)
self.assertEquals(result10.dtype(), dict)
self.__test_equal(result10, expected10, dict)
self.assertEquals(result11.dtype(), dict)
self.__test_equal(result11, expected11, dict)
self.assertEquals(result12.dtype(), dict)
self.__test_equal(result12, expected12, dict)
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
#should fail if the input type is not string
sa._count_ngrams()
with self.assertRaises(TypeError):
#should fail if n is not of type 'int'
sa_word._count_ngrams(1.01)
with self.assertRaises(ValueError):
#should fail with invalid method
sa_word._count_ngrams(3,"bla")
with self.assertRaises(ValueError):
#should fail with n <0
sa_word._count_ngrams(0)
with warnings.catch_warnings(True) as context:
sa_word._count_ngrams(10)
assert len(context) == 1
def test_dict_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_keys = sa.dict_keys()
self.assertEquals(sa_keys, [str(i) for i in self.int_data])
# na value
d = [{'a': 1}, {None: 2}, {"b": None}, None]
sa = SArray(d)
sa_keys = sa.dict_keys()
self.assertEquals(sa_keys, [['a'], [None], ['b'], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_keys()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_keys().head(10)), [], list)
def test_dict_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_values = sa.dict_values()
self.assertEquals(sa_values, [[i, float(i)] for i in self.int_data])
# na value
d = [{'a': 1}, {None: 'str'}, {"b": None}, None]
sa = SArray(d)
sa_values = sa.dict_values()
self.assertEquals(sa_values, [[1], ['str'], [None], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_values()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_values().head(10)), [], list)
def test_dict_trim_by_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': [1,2]}, {None: 'str'}, {"b": None, "c": 1}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_keys(['a', 'b'])
self.assertEquals(sa_values, [{}, {None: 'str'}, {"c": 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_keys([])
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_keys([]).head(10)), [], list)
def test_dict_trim_by_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_values(5,10)
self.assertEquals(sa_values, [{'b': 20, 'c':None}, {None:5}, None])
# no upper key
sa_values = sa.dict_trim_by_values(2)
self.assertEquals(sa_values, [{'b': 20}, {"b": 4, None:5}, None])
# no param
sa_values = sa.dict_trim_by_values()
self.assertEquals(sa_values, [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None])
# no lower key
sa_values = sa.dict_trim_by_values(upper=7)
self.assertEquals(sa_values, [{'a':1, 'c':None}, {"b": 4, None: 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_values()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_values().head(10)), [], list)
def test_dict_has_any_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_any_keys([])
self.assertEquals(sa_values, [0,0,0,0])
sa_values = sa.dict_has_any_keys(['a'])
self.assertEquals(sa_values, [1,0,0,1])
# one value is auto convert to list
sa_values = sa.dict_has_any_keys("a")
self.assertEquals(sa_values, [1,0,0,1])
sa_values = sa.dict_has_any_keys(['a', 'b'])
self.assertEquals(sa_values, [1,1,0,1])
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_any_keys([]).head(10)), [], list)
def test_dict_has_all_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_all_keys([])
self.assertEquals(sa_values, [1,1,0,1])
sa_values = sa.dict_has_all_keys(['a'])
self.assertEquals(sa_values, [1,0,0,1])
# one value is auto convert to list
sa_values = sa.dict_has_all_keys("a")
self.assertEquals(sa_values, [1,0,0,1])
sa_values = sa.dict_has_all_keys(['a', 'b'])
self.assertEquals(sa_values, [1,0,0,0])
sa_values = sa.dict_has_all_keys([None, "b"])
self.assertEquals(sa_values, [0,1,0,0])
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_all_keys([]).head(10)), [], list)
def test_save_load_cleanup_file(self):
# simlarly for SArray
with util.TempDirectory() as f:
sa = SArray(range(1,1000000))
sa.save(f)
# 17 for each sarray, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# sf1 now references the on disk file
sa1 = SArray(f);
# create another SFrame and save to the same location
sa2 = SArray([str(i) for i in range(1,100000)])
sa2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# now sf1 should still be accessible
self.__test_equal(sa1, list(sa), int)
# and sf2 is correct too
sa3 = SArray(f)
self.__test_equal(sa3, list(sa2), str)
# when sf1 goes out of scope, the tmp files should be gone
sa1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# list_to_compare must have all unique values for this to work
def __generic_unique_test(self, list_to_compare):
test = SArray(list_to_compare + list_to_compare)
self.assertEquals(sorted(list(test.unique())), sorted(list_to_compare))
def test_unique(self):
# Test empty SArray
test = SArray([])
self.assertEquals(list(test.unique()), [])
# Test one value
test = SArray([1])
self.assertEquals(list(test.unique()), [1])
# Test many of one value
test = SArray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEquals(list(test.unique()), [1])
# Test all unique values
test = SArray(self.int_data)
self.assertEquals(sorted(list(test.unique())), self.int_data)
# Test an interesting sequence
interesting_ints = [4654,4352436,5453,7556,45435,4654,5453,4654,5453,1,1,1,5,5,5,8,66,7,7,77,90,-34]
test = SArray(interesting_ints)
u = test.unique()
self.assertEquals(len(u), 13)
# We do not preserve order
self.assertEquals(sorted(list(u)), sorted(np.unique(interesting_ints)))
# Test other types
self.__generic_unique_test(self.string_data[0:6])
# only works reliably because these are values that floats can perform
# reliable equality tests
self.__generic_unique_test(self.float_data)
self.__generic_unique_test(self.list_data)
self.__generic_unique_test(self.vec_data)
with self.assertRaises(TypeError):
SArray(self.dict_data).unique()
def test_item_len(self):
# empty SArray
test = SArray([])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# wrong type
test = SArray([1,2,3])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
test = SArray(['1','2','3'])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# vector type
test = SArray([[], [1], [1,2], [1,2,3], None])
item_length = test.item_length();
self.assertEquals(list(item_length), list([0, 1,2,3,None]))
# dict type
test = SArray([{}, {'key1': 1}, {'key2':1, 'key1':2}, None])
self.assertEquals(list(test.item_length()), list([0, 1,2,None]))
# list type
test = SArray([[], [1,2], ['str', 'str2'], None])
self.assertEquals(list(test.item_length()), list([0, 2,2,None]))
def test_random_access(self):
t = list(range(0,100000))
s = SArray(t)
# simple slices
self.__test_equal(s[1:10000], t[1:10000], int)
self.__test_equal(s[0:10000:3], t[0:10000:3], int)
self.__test_equal(s[1:10000:3], t[1:10000:3], int)
self.__test_equal(s[2:10000:3], t[2:10000:3], int)
self.__test_equal(s[3:10000:101], t[3:10000:101], int)
# negative slices
self.__test_equal(s[-5:], t[-5:], int)
self.__test_equal(s[-1:], t[-1:], int)
self.__test_equal(s[-100:-10], t[-100:-10], int)
self.__test_equal(s[-100:-10:2], t[-100:-10:2], int)
# single element reads
self.assertEquals(s[511], t[511])
self.assertEquals(s[1912], t[1912])
self.assertEquals(s[-1], t[-1])
self.assertEquals(s[-10], t[-10])
# A cache boundary
self.assertEquals(s[32*1024-1], t[32*1024-1])
self.assertEquals(s[32*1024], t[32*1024])
# totally different
self.assertEquals(s[19312], t[19312])
# edge case odities
self.__test_equal(s[10:100:100], t[10:100:100], int)
self.__test_equal(s[-100:len(s):10], t[-100:len(t):10], int)
self.__test_equal(s[-1:-2], t[-1:-2], int)
self.__test_equal(s[-1:-1000:2], t[-1:-1000:2], int)
with self.assertRaises(IndexError):
s[len(s)]
# with caching abilities; these should be fast, as 32K
# elements are cached.
for i in range(0, 100000, 100):
self.assertEquals(s[i], t[i])
for i in range(0, 100000, 100):
self.assertEquals(s[-i], t[-i])
def test_sort(self):
test = SArray([1,2,3,5,1,4])
ascending = SArray([1,1,2,3,4,5])
descending = SArray([5,4,3,2,1,1])
result = test.sort()
self.assertEqual(result, ascending)
result = test.sort(ascending = False)
self.assertEqual(result, descending)
with self.assertRaises(TypeError):
SArray([[1,2], [2,3]]).sort()
def test_unicode_encode_should_not_fail(self):
g=SArray([{'a':u'\u2019'}])
g=SArray([u'123',u'\u2019'])
g=SArray(['123',u'\u2019'])
def test_read_from_avro(self):
data = """Obj\x01\x04\x16avro.schema\xec\x05{"fields": [{"type": "string", "name": "business_id"}, {"type": "string", "name": "date"}, {"type": "string", "name": "review_id"}, {"type": "int", "name": "stars"}, {"type": "string", "name": "text"}, {"type": "string", "name": "type"}, {"type": "string", "name": "user_id"}, {"type": {"type": "map", "values": "int"}, "name": "votes"}], "type": "record", "name": "review"}\x14avro.codec\x08null\x00\x0e7\x91\x0b#.\x8f\xa2H%<G\x9c\x89\x93\xfb\x04\xe8 ,sgBl3UDEcNYKwuUb92CYdA\x142009-01-25,Zj-R0ZZqIKFx56LY2su1iQ\x08\x80\x19The owner of China King had never heard of Yelp...until Jim W rolled up on China King!\n\nThe owner of China King, Michael, is very friendly and chatty. Be Prepared to chat for a few minutes if you strike up a conversation.\n\nThe service here was terrific. We had several people fussing over us but the primary server, Maggie was a gem. \n\nMy wife and the kids opted for the Americanized menu and went with specials like sweet and sour chicken, shrimp in white sauce and garlic beef. Each came came with soup, egg roll and rice. I sampled the garlic beef which they prepared with a kung pao brown sauce (a decision Maggie and my wife arrived at after several minutes of discussion) it had a nice robust flavor and the veggies were fresh and flavorful. I also sampled the shrimp which were succulent and the white sauce had a little more distinctiveness to it than the same sauce at many Chinese restaurants.\n\nI ordered from the traditional menu but went not too adventurous with sizzling plate with scallops and shrimp in black pepper sauce. Very enjoyable. Again, succulent shrimp. The scallops were tasty as well. Realizing that I moved here from Boston and I go into any seafood experience with diminished expectations now that I live in the west, I have to say the scallops are among the fresher and judiciously prepared that I have had in Phoenix.\n\nOverall China King delivered a very tasty and very fresh meal. They have a fairly extensive traditional menu which I look forward to exploring further.\n\nThanks to Christine O for her review...after reading that I knew China King was A-OK.\x0creview,P2kVk4cIWyK4e4h14RhK-Q\x06\nfunny\x08\x0cuseful\x12\x08cool\x0e\x00,arKckMf7lGNYjXjKo6DXcA\x142012-05-05,EyVfhRDlyip2ErKMOHEA-A\x08\xa4\x04We\'ve been here a few times and we love all the fresh ingredients. The pizza is good when you eat it fresh but if you like to eat your pizza cold then you\'ll be biting into hard dough. Their Nutella pizza is good. Take a menu and check out their menu and hours for specials.\x0creview,x1Yl1dpNcWCCEdpME9dg0g\x06\nfunny\x02\x0cuseful\x02\x08cool\x00\x00\x0e7\x91\x0b#.\x8f\xa2H%<G\x9c\x89\x93\xfb"""
test_avro_file = open("test.avro", "wb")
test_avro_file.write(data)
test_avro_file.close()
sa = SArray.from_avro("test.avro")
self.assertEqual(sa.dtype(), dict)
self.assertEqual(len(sa), 2)
def test_from_const(self):
g = SArray.from_const('a', 100)
self.assertEqual(len(g), 100)
self.assertEqual(list(g), ['a']*100)
g = SArray.from_const(dt.datetime(2013, 5, 7, 10, 4, 10),10)
self.assertEqual(len(g), 10)
self.assertEqual(list(g), [dt.datetime(2013, 5, 7, 10, 4, 10)]*10)
g = SArray.from_const(0, 0)
self.assertEqual(len(g), 0)
g = SArray.from_const(None, 100)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), float)
def test_from_sequence(self):
with self.assertRaises(TypeError):
g = SArray.from_sequence()
g = SArray.from_sequence(100)
self.assertEqual(list(g), range(100))
g = SArray.from_sequence(10, 100)
self.assertEqual(list(g), range(10, 100))
g = SArray.from_sequence(100, 10)
self.assertEqual(list(g), range(100, 10))
def test_datetime(self):
sa = SArray(self.datetime_data)
self.__test_equal(sa ,self.datetime_data,dt.datetime)
sa = SArray(self.datetime_data2)
self.__test_equal(sa ,self.datetime_data2,dt.datetime)
ret = sa.split_datetime(limit=['year','month','day','hour','minute',
'second','us','weekday', 'isoweekday','tmweekday'])
self.assertEqual(ret.num_cols(), 10)
self.__test_equal(ret['X.year'] , [2013, 1902, None], int)
self.__test_equal(ret['X.month'] , [5, 10, None], int)
self.__test_equal(ret['X.day'] , [7, 21, None], int)
self.__test_equal(ret['X.hour'] , [10, 10, None], int)
self.__test_equal(ret['X.minute'] , [4, 34, None], int)
self.__test_equal(ret['X.second'] , [10, 10, None], int)
self.__test_equal(ret['X.us'] , [109321, 991111, None], int)
self.__test_equal(ret['X.weekday'] , [1, 1, None], int)
self.__test_equal(ret['X.isoweekday'] , [2, 2, None], int)
self.__test_equal(ret['X.tmweekday'] , [2, 2, None], int)
def test_datetime_lambda(self):
data = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111,
tzinfo=GMT(1))]
g=SArray(data)
gstr=g.apply(lambda x:str(x))
self.__test_equal(gstr, [str(x) for x in g], str)
gident=g.apply(lambda x:x)
self.__test_equal(gident, list(g), dt.datetime)
def test_datetime_to_str(self):
sa = SArray(self.datetime_data)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None],str)
sa = SArray([None,None,None],dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[None,None,None],str)
sa = SArray(dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[],str)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.datetime_to_str)
sa = SArray()
self.assertRaises(TypeError,sa.datetime_to_str)
def test_str_to_datetime(self):
sa_string = SArray(['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None])
sa_datetime_back = sa_string.str_to_datetime()
expected = self.datetime_data
self.__test_equal(sa_datetime_back,expected,dt.datetime)
sa_string = SArray([None,None,None],str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[None,None,None],dt.datetime)
sa_string = SArray(dtype=str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[],dt.datetime)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.str_to_datetime)
sa = SArray()
self.assertRaises(TypeError,sa.str_to_datetime)
# hour without leading zero
sa = SArray(['10/30/2014 9:01'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M')
expected = [dt.datetime(2014, 10, 30, 9, 1)]
self.__test_equal(sa,expected,dt.datetime)
# without delimiters
sa = SArray(['10302014 0901', '10302014 2001'])
sa = sa.str_to_datetime('%m%d%Y %H%M')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 20, 1)]
self.__test_equal(sa,expected,dt.datetime)
# another without delimiter test
sa = SArray(['20110623T191001'])
sa = sa.str_to_datetime("%Y%m%dT%H%M%S%F%q")
expected = [dt.datetime(2011, 06, 23, 19, 10, 1)]
self.__test_equal(sa,expected,dt.datetime)
# am pm
sa = SArray(['10/30/2014 9:01am', '10/30/2014 9:01pm'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%p')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
sa = SArray(['10/30/2014 9:01AM', '10/30/2014 9:01PM'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%P')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
# failure 13pm
sa = SArray(['10/30/2014 13:01pm'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %H:%M%p')
# failure hour 13 when %l should only have up to hour 12
sa = SArray(['10/30/2014 13:01'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %l:%M')
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %L:%M')
sa = SArray(['2013-05-07T10:04:10',
'1902-10-21T10:34:10UTC+05:45'])
expected = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(5.75))]
self.__test_equal(sa.str_to_datetime() ,expected,dt.datetime)
def test_apply_with_partial(self):
sa = SArray([1, 2, 3, 4, 5])
def concat_fn(character, number):
return '%s%d' % (character, number)
my_partial_fn = functools.partial(concat_fn, 'x')
sa_transformed = sa.apply(my_partial_fn)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sa = SArray([1, 2, 3, 4, 5])
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, number):
return '%s%d' % (self.character, number)
concatenator = Concatenator('x')
sa_transformed = sa.apply(concatenator)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_argmax_argmin(self):
sa = SArray([1,4,-1,10,3,5,8])
index = [sa.argmax(),sa.argmin()]
expected = [3,2]
self.assertEqual(index,expected)
sa = SArray([1,4.3,-1.4,0,3,5.6,8.9])
index = [sa.argmax(),sa.argmin()]
expected = [6,2]
self.assertEqual(index,expected)
#empty case
sa = SArray([])
index = [sa.argmax(),sa.argmin()]
expected = [None,None]
self.assertEqual(index,expected)
# non-numeric type
sa = SArray(["434","43"])
with self.assertRaises(TypeError):
sa.argmax()
with self.assertRaises(TypeError):
sa.argmin()
def test_apply_with_recursion(self):
sa = SArray(range(1000))
sastr = sa.astype(str)
rets = sa.apply(lambda x:sastr[x])
self.assertEqual(list(rets), list(sastr))
def test_save_sarray(self):
'''save lazily evaluated SArray should not matrialize to target folder
'''
data = SArray(range(1000))
data = data[data > 50]
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print data
| bsd-3-clause |
berkeley-stat159/project-zeta | code/utils/subject_class.py | 3 | 3903 | from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import os
import re
import nibabel as nib
class subject(object):
"""try to organize data based on subject ex sub001"""
# deal with path problem
def __init__(self, sub):
# subject info ex: 'sub001'
self.sub_id = sub
# deal with path problem
# root_path = "."
# pattern = re.compile(r'[/\\]')
# cwd = os.getcwd()
# check_path = pattern.split(cwd)
# if "project" not in check_path[-1]:
# root_path = ".."
# use another way to deal with path problem:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..", "..")
# check if the input is valid
valid_sub = ["sub001", "sub002", "sub003", "sub004", "sub005", "sub006"]
assert self.sub_id in valid_sub, "%s is not a valid input" % self.sub_id
# BOLD folder for this subject
# sub_BOLD_path = os.path.join(root_path, "data", "ds105", "%s" % self.sub_id, "BOLD")
# BOLD folder for this subject (preprocessed)
sub_BOLD_path = os.path.join(base_path, "data", "ds105_new", "%s" % self.sub_id, "model", "model001")
# anatomy folder for this subject
sub_anatomy_file = os.path.join(base_path, "data", "ds105_old", "%s" % self.sub_id, "anatomy","highres001_brain.nii.gz")
# runfile_list: ['task001_run001', 'task001_run002'......]
runfile_list = ['task001_run' + i + '.txt' for i in
['001', '002', '003', '004', '005', '006', '007', '008', '009', '010', '011', '012']]
runlist = ['run' + i for i in
['001', '002', '003', '004', '005', '006', '007', '008', '009', '010', '011', '012']]
# deal with sub005 which has only 11 run results
if self.sub_id == "sub005":
runfile_list = runfile_list[:-1]
runlist = runlist[:-1]
# TR info:
self.TR = 2.5
# load high resolution brain structure for this subject
self.brain_img = nib.load(sub_anatomy_file)
# load all bold image file for this subject
self.run_img_result = {}
for i in runlist:
# img_file_path = os.path.join(sub_BOLD_path, "task001_%s.feat" % i, "bold.nii.gz")
img_file_path = os.path.join(sub_BOLD_path, "task001_%s.feat" % i, "filtered_func_data_mni.nii.gz")
self.run_img_result[self.sub_id + '_' + i] = nib.load(img_file_path)
# ordered run keys:
ordered_run_keys = self.run_img_result.keys()
ordered_run_keys.sort()
self.run_keys = ordered_run_keys
# shape of the BOLD data:
self.BOLD_shape = self.run_img_result[self.run_keys[1]].shape
# conditions setting: which condition is for which category
condition_key_path = os.path.join(base_path, "data", "ds105_old", "models", "model001", "condition_key.txt")
condition_key_file = open(condition_key_path)
condition_list = condition_key_file.readlines()
condition = re.compile(r'(cond\d+) (\w+)')
result = {}
for item in condition_list:
for match in condition.finditer(item):
result[match.group(1)] = match.group(2)
self.condition_key = result
# condition files for each objects for each run
sub_condition_path = os.path.join(base_path, "data", "ds105_old", "%s" % self.sub_id, "model", "model001",
"onsets")
self.conditions = {}
for i in runfile_list:
for j in self.condition_key.keys():
self.conditions[i[8:14] + '-' + self.condition_key[j]] = os.path.join(sub_condition_path, i[:-4],
j + '.txt')
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/io/formats/style/test_align.py | 3 | 13336 | import pytest
from pandas import DataFrame
pytest.importorskip("jinja2")
def bar_grad(a=None, b=None, c=None, d=None):
"""Used in multiple tests to simplify formatting of expected result"""
ret = [("width", "10em"), ("height", "80%")]
if all(x is None for x in [a, b, c, d]):
return ret
return ret + [
(
"background",
f"linear-gradient(90deg,{','.join(x for x in [a, b, c, d] if x)})",
)
]
class TestStylerBarAlign:
def test_bar_align_left(self):
df = DataFrame({"A": [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(2, 0): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
}
assert result == expected
result = df.style.bar(color="red", width=50)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad("red 25.0%", " transparent 25.0%"),
(2, 0): bar_grad("red 50.0%", " transparent 50.0%"),
}
assert result == expected
df["C"] = ["a"] * len(df)
result = df.style.bar(color="red", width=50)._compute().ctx
assert result == expected
df["C"] = df["C"].astype("category")
result = df.style.bar(color="red", width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {
(0, 0): bar_grad(),
(0, 1): bar_grad(),
(0, 2): bar_grad(),
(1, 0): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(1, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(1, 2): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(2, 0): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
(2, 1): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
(2, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {
(0, 0): bar_grad(),
(0, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(0, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
(1, 0): bar_grad(),
(1, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(1, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
(2, 0): bar_grad(),
(2, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"),
(2, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"),
}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = DataFrame({"A": [-10, 0, 20, 90]})
result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx
expected = {
(0, 0): bar_grad(
"#d65f5f 10.0%",
" transparent 10.0%",
),
(1, 0): bar_grad(),
(2, 0): bar_grad(
" transparent 10.0%",
" #5fba7d 10.0%",
" #5fba7d 30.0%",
" transparent 30.0%",
),
(3, 0): bar_grad(
" transparent 10.0%",
" #5fba7d 10.0%",
" #5fba7d 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = DataFrame({"A": [10, 20, 50, 100]})
result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx
expected = {
(0, 0): bar_grad(
"#5fba7d 10.0%",
" transparent 10.0%",
),
(1, 0): bar_grad(
"#5fba7d 20.0%",
" transparent 20.0%",
),
(2, 0): bar_grad(
"#5fba7d 50.0%",
" transparent 50.0%",
),
(3, 0): bar_grad(
"#5fba7d 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = DataFrame({"A": [-100, -60, -30, -20]})
result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx
expected = {
(0, 0): bar_grad(
"#d65f5f 100.0%",
" transparent 100.0%",
),
(1, 0): bar_grad(
" transparent 40.0%",
" #d65f5f 40.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
(2, 0): bar_grad(
" transparent 70.0%",
" #d65f5f 70.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
(3, 0): bar_grad(
" transparent 80.0%",
" #d65f5f 80.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = DataFrame({"A": [-10, 0, 20, 90]})
result = (
df.style.bar(align="zero", color=["#d65f5f", "#5fba7d"], width=90)
._compute()
.ctx
)
expected = {
(0, 0): bar_grad(
" transparent 40.0%",
" #d65f5f 40.0%",
" #d65f5f 45.0%",
" transparent 45.0%",
),
(1, 0): bar_grad(),
(2, 0): bar_grad(
" transparent 45.0%",
" #5fba7d 45.0%",
" #5fba7d 55.0%",
" transparent 55.0%",
),
(3, 0): bar_grad(
" transparent 45.0%",
" #5fba7d 45.0%",
" #5fba7d 90.0%",
" transparent 90.0%",
),
}
assert result == expected
def test_bar_align_left_axis_none(self):
df = DataFrame({"A": [0, 1], "B": [2, 4]})
result = df.style.bar(axis=None)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
"#d65f5f 25.0%",
" transparent 25.0%",
),
(0, 1): bar_grad(
"#d65f5f 50.0%",
" transparent 50.0%",
),
(1, 1): bar_grad(
"#d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_zero_axis_none(self):
df = DataFrame({"A": [0, 1], "B": [-2, 4]})
result = df.style.bar(align="zero", axis=None)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
" transparent 50.0%",
" #d65f5f 50.0%",
" #d65f5f 62.5%",
" transparent 62.5%",
),
(0, 1): bar_grad(
" transparent 25.0%",
" #d65f5f 25.0%",
" #d65f5f 50.0%",
" transparent 50.0%",
),
(1, 1): bar_grad(
" transparent 50.0%",
" #d65f5f 50.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_mid_axis_none(self):
df = DataFrame({"A": [0, 1], "B": [-2, 4]})
result = df.style.bar(align="mid", axis=None)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
" transparent 33.3%",
" #d65f5f 33.3%",
" #d65f5f 50.0%",
" transparent 50.0%",
),
(0, 1): bar_grad(
"#d65f5f 33.3%",
" transparent 33.3%",
),
(1, 1): bar_grad(
" transparent 33.3%",
" #d65f5f 33.3%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_mid_vmin(self):
df = DataFrame({"A": [0, 1], "B": [-2, 4]})
result = df.style.bar(align="mid", axis=None, vmin=-6)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
" transparent 60.0%",
" #d65f5f 60.0%",
" #d65f5f 70.0%",
" transparent 70.0%",
),
(0, 1): bar_grad(
" transparent 40.0%",
" #d65f5f 40.0%",
" #d65f5f 60.0%",
" transparent 60.0%",
),
(1, 1): bar_grad(
" transparent 60.0%",
" #d65f5f 60.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_mid_vmax(self):
df = DataFrame({"A": [0, 1], "B": [-2, 4]})
result = df.style.bar(align="mid", axis=None, vmax=8)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
" transparent 20.0%",
" #d65f5f 20.0%",
" #d65f5f 30.0%",
" transparent 30.0%",
),
(0, 1): bar_grad(
"#d65f5f 20.0%",
" transparent 20.0%",
),
(1, 1): bar_grad(
" transparent 20.0%",
" #d65f5f 20.0%",
" #d65f5f 60.0%",
" transparent 60.0%",
),
}
assert result == expected
def test_bar_align_mid_vmin_vmax_wide(self):
df = DataFrame({"A": [0, 1], "B": [-2, 4]})
result = df.style.bar(align="mid", axis=None, vmin=-3, vmax=7)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
" transparent 30.0%",
" #d65f5f 30.0%",
" #d65f5f 40.0%",
" transparent 40.0%",
),
(0, 1): bar_grad(
" transparent 10.0%",
" #d65f5f 10.0%",
" #d65f5f 30.0%",
" transparent 30.0%",
),
(1, 1): bar_grad(
" transparent 30.0%",
" #d65f5f 30.0%",
" #d65f5f 70.0%",
" transparent 70.0%",
),
}
assert result == expected
def test_bar_align_mid_vmin_vmax_clipping(self):
df = DataFrame({"A": [0, 1], "B": [-2, 4]})
result = df.style.bar(align="mid", axis=None, vmin=-1, vmax=3)._compute().ctx
expected = {
(0, 0): bar_grad(),
(1, 0): bar_grad(
" transparent 25.0%",
" #d65f5f 25.0%",
" #d65f5f 50.0%",
" transparent 50.0%",
),
(0, 1): bar_grad("#d65f5f 25.0%", " transparent 25.0%"),
(1, 1): bar_grad(
" transparent 25.0%",
" #d65f5f 25.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_mid_nans(self):
df = DataFrame({"A": [1, None], "B": [-1, 3]})
result = df.style.bar(align="mid", axis=None)._compute().ctx
expected = {
(0, 0): bar_grad(
" transparent 25.0%",
" #d65f5f 25.0%",
" #d65f5f 50.0%",
" transparent 50.0%",
),
(0, 1): bar_grad("#d65f5f 25.0%", " transparent 25.0%"),
(1, 1): bar_grad(
" transparent 25.0%",
" #d65f5f 25.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_align_zero_nans(self):
df = DataFrame({"A": [1, None], "B": [-1, 2]})
result = df.style.bar(align="zero", axis=None)._compute().ctx
expected = {
(0, 0): bar_grad(
" transparent 50.0%",
" #d65f5f 50.0%",
" #d65f5f 75.0%",
" transparent 75.0%",
),
(0, 1): bar_grad(
" transparent 25.0%",
" #d65f5f 25.0%",
" #d65f5f 50.0%",
" transparent 50.0%",
),
(1, 1): bar_grad(
" transparent 50.0%",
" #d65f5f 50.0%",
" #d65f5f 100.0%",
" transparent 100.0%",
),
}
assert result == expected
def test_bar_bad_align_raises(self):
df = DataFrame({"A": [-100, -60, -30, -20]})
msg = "`align` must be one of {'left', 'zero',' mid'}"
with pytest.raises(ValueError, match=msg):
df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"])
| bsd-3-clause |
darshanthaker/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
wlamond/scikit-learn | sklearn/model_selection/tests/test_validation.py | 7 | 42247 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
exp_pred_test = np.zeros((len(test), classes))
if method is 'decision_function' and len(est.classes_) == 2:
exp_pred_test[:, est.classes_[-1]] = expected_predictions_
else:
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(8).reshape(4, 2)
y = np.array([0, 0, 1, 2])
classes = 3
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = [1, 1, -4, 6]
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/ensemble/forest.py | 1 | 78779 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
# This is a utility function for joblib's Parallel. It can't go locally in
# ForestClassifier or ForestRegressor, because joblib complains that it cannot
# pickle it when placed there.
def accumulate_prediction(predict, X, out):
prediction = predict(X, check_input=False)
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict_proba, X, all_proba)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict, X, [y_hat])
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=2, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(clf.feature_importances_)
[ 0.17287856 0.80608704 0.01884792 0.00218648]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>>
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=2,
max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(regr.feature_importances_)
[ 0.17339552 0.81594114 0. 0.01066333]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-2.50699856]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
BursonLab/Silica-Coding-Project | Misc Parts/Silicon and Oxygen from Centers using Delaunay.py | 1 | 17703 | import math
import numpy
import matplotlib.pyplot as plt
# - * - coding: utf - 8 - * -
"""
Created on Wed May 31 15:27:40 2017
@author: Kristen
"""
# - * - coding: utf - 8 - * -
"""
Created on Tue May 30 09:40:34 2017
@author: Kristen
"""
def distance(position1, position2):
""" Finds the distance between two atoms. """
return math.sqrt(math.pow(position1[0] - position2[0], 2) +
math.pow(position1[1] - position2[1], 2) +
math.pow(position1[2] - position2[2], 2))
def dists(positions, dist):
""" Finds if a triplet could have an Si atom between them """
# if there were not enough close to make a triplet, return none
if len(positions) < 3:
return[""]
# if there is a triplet and they are close enough to have a Si,
# return the triplet, else return blank
if len(positions) == 3:
if distance(positions[1], positions[2]) <= dist:
return positions
else:
return[""]
numbers = []
# if there are more then 2 close enough to have a Si between them, find the
# one that could not given the other two
for i in range(len(positions)):
numbers.append(0)
for i in range(1, len(positions) - 1):
for j in range(1, len(positions) - i):
# if two positions are not close enough, add a counter to both.
# If they are close enough, remove a counter from both
if distance(positions[i], positions[i + j]) > dist:
numbers[i] += 1
numbers[i + j] += 1
else:
numbers[i] -= 1
numbers[i + j] -= 1
# remove the one with the most counters
del positions[numbers.index(max(numbers))]
# if these still are not close enough to have a triplet between them,
# return none. If they are close enough, return the new triplet
if distance(positions[1], positions[2]) <= dist:
return positions
else:
return[""]
def find_four(opositions, far):
""" Finds four membered rings and returns a list of lists of their
locations """
rings = [[]]
remov = []
# for each oxygen
for i in range(len(opositions)):
rings.append([""])
rings[i] = [opositions[i]]
# for each oxygen with an x position higher than the current
for j in range(1, len(opositions) - i):
# if th exposition is less than the possible distance between two
# oxygenatoms(variableinclusionradius)
if abs(opositions[i][0] - opositions[i + j][0]) <= far:
# if the distance between the two oxygens is less than the
# characteristic distance(variable inclusion radius)
if distance(opositions[i], opositions[i + j]) <= far:
rings[i].append(opositions[i + j])
rem = 0
if len(rings[i]) < 4:
rem = 1
elif len(rings[i]) > 4:
while len(rings[i]) != 4:
distances = []
for k in range(len(rings[i])):
tot_len = 0
for l in range(1, len(rings[i]) - k):
tot_len += distance(rings[i][k], rings[i][k + l])
distances.append(tot_len)
del rings[i][distances.index(max(distances))]
if len(rings[i]) == 4:
distances = []
for n in range(len(rings[i]) - 1):
for m in range(1, len(rings[i]) - n):
distances.append(distance(rings[i][n], rings[i][n + m]))
for n in range(2):
del distances[distances.index(max(distances))]
for n in range(4):
for m in range(1, len(distances) - n):
if abs(distances[n] - distances[n + m]) > .03:
rem = 1
if rem == 1:
remov.insert(0, i)
for n in range(len(remov)):
del rings[remov[n]]
return rings
def triarea(p1, p2, p3):
""" Finds the area of a triangle. """
a = distance(p1, p2)
b = distance(p2, p3)
c = distance(p1, p3)
s = (a + b + c) / 2
return math.sqrt(s * (s - a) * (s - b) * (s - c))
def ringarea(corners):
n = len(corners)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return float(area)
def rem4(rings, si):
""" Finds if the silicon atom is within a four membered ring. """
deletes = []
for i in range(len(rings)):
triangles = 0
distances = []
locations = []
for n in range(len(rings[i]) - 1):
for m in range(1, len(rings[i]) - n):
distances.append(distance(rings[i][n], rings[i][n + m]))
locations.append([n, n + m])
locations.append(len(rings[i]))
for n in range(2):
del locations[distances.index(max(distances))]
del distances[distances.index(max(distances))]
for n in range(len(locations)):
triangles += triarea(rings[i][locations[n][0]],
rings[i][locations[n][1]], si)
if ringarea(rings[i]) == triangles:
return"n"
return"y"
# finds the position of a Si given a triplet of oxygen
def si_finder(opositions):
# characteristic distance
dist = 1.6 * math.pow(10, - 1)
# sets up the translation to happen around a basepoint(the first point in
# the positions)
trans = [[0, 0, 0], [opositions[1][0] - opositions[0][0],
opositions[1][1] - opositions[0][1],
opositions[1][2] - opositions[0][2]],
[opositions[2][0] - opositions[0][0], opositions[2][1] -
opositions[0][1], opositions[2][2] - opositions[0][2]]]
# finds vector perpendicular to the plane of the three points
v = numpy.matrix([numpy.linalg.det([[trans[1][1], trans[2][1]],
[trans[1][2], trans[2][2]]]),
numpy.linalg.det([[trans[1][0], trans[2][0]],
[trans[1][2], trans[2][2]]]),
numpy.linalg.det([[trans[1][0], trans[2][0]],
[trans[1][1], trans[2][1]]])])
# sets up first rotation matrix about the x axis
theta = math.atan2(v.item(1), v.item(2))
xmatr = numpy.matrix([[1, 0, 0], [0, math.cos(theta), - math.sin(theta)],
[0, math.sin(theta), math.cos(theta)]])
trans1 = numpy.matrix(trans)
rot1 = numpy.matrix.dot(trans1, xmatr)
v1 = numpy.matrix.dot(v, xmatr)
# second rotation matrix about the y axis
rho = math.atan2(v1.item(0), v1.item(2))
ymatr = numpy.matrix([[math.cos(rho), 0, math.sin(rho)], [0, 1, 0],
[-math.sin(rho), 0, math.cos(rho)]])
rot2 = numpy.matrix.dot(rot1, ymatr)
# should be in the xy plane now. Have to rotate such that two points
# are on the x axis
alph = math.atan2(rot2.item(4), rot2.item(3))
bet = math.atan2(rot2.item(7), rot2.item(6))
r1 = math.sqrt(math.pow(rot2.item(3), 2) + math.pow(rot2.item(4), 2))
r2 = math.sqrt(math.pow(rot2.item(6), 2) + math.pow(rot2.item(7), 2))
rot3 = numpy.matrix([[rot2.item(0), rot2.item(1), rot2.item(2)],
[r1, 0, 0],
[r2 * math.cos(bet - alph),
r2 * math.sin(bet - alph), 0]])
x = r1 / 2
y = r2 * (1 - math.cos(bet - alph)) / (2.0 * math.sin(bet - alph))
z = math.sqrt(abs(math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2)))
si_pos = numpy.matrix([x, y, z])
# rotate back to originial position
init = math.atan2(si_pos.item(1), si_pos.item(0))
r = math.sqrt(math.pow(si_pos.item(0), 2) + math.pow(si_pos.item(1), 2))
x = r * math.cos(init + alph)
y = r * math.sin(init + alph)
si_pos = numpy.matrix([x, y, z])
# undo second rotation matrix
iymatr = numpy.linalg.inv(ymatr)
si_pos = numpy.matrix.dot(si_pos, iymatr)
# undo first rotation matrix
ixmatr = numpy.linalg.inv(xmatr)
si_pos = numpy.matrix.dot(si_pos, ixmatr)
# translate back so there is no point at the origin
si_pos = [si_pos.item(0) + opositions[0][0],
si_pos.item(1) + opositions[0][1],
si_pos.item(2) + opositions[0][2]]
return si_pos
# locates all possiblee triplets
def o_locator(opositions):
dist = 1.6 * math.pow(10, - 1)
# assumed oxygens are ordered by increasing x values
# used to collect all the found oxygens close enough to have a single Si
# between them
found = [[""]]
# for each oxygen
for i in range(len(opositions)):
found[i] = [opositions[i]]
# for each oxygen with an x position higher than the current
for j in range(1, len(opositions) - i):
# if the x position is less than the possible distance between two
# oxygenatoms(variableinclusionradius)
if abs(opositions[i][0] - opositions[i + j][0]) <= \
3.45 * math.pow(10, - 1):
# if the distance between the two oxygens is less than the
# characteristic distance(variable inclusion radius)
if distance(opositions[i], opositions[i + j]) <= \
3.45 * math.pow(10, - 1):
found[i].append(opositions[i + j])
found.append([""])
# removes last appended empty list
del found[len(found) - 1]
# remove all those too far apart using dist function (variable inclusion
# radius)
for n in range(len(found)):
found[n] = dists(found[n], .345)
# createanarrayforpositionstoremove
remov = []
# for all atoms with found oxygens
for n in range(len(found)):
# add empties to a list for removal
if found[n] == [""]:
remov.insert(0, n)
# remove those in the remove list
for m in range(len(remov)):
del found[remov[m]]
# return the list of those oxygen that have a possible Si between them
return found
def locate_si(positions, dist):
# assumes presorted positions by x position
doubles = []
# finds all within the given radius and adds those doubles to the list
for i in range(len(positions)):
for j in range(1, len(positions) - i):
if distance(positions[i], positions[i + j]) <= dist:
doubles.append([positions[i], positions[i + j]])
return doubles
def find_o(positions, dist):
opositions = []
for i in range(len(positions)):
# center at origin
pos1 = [0, 0, 0]
pos2 = [positions[i][1][0] - positions[i][0][0], positions[i][1][1] -
positions[i][0][1], positions[i][1][2] - positions[i][0][2]]
# rotate until both points are in the xy plane
theta = numpy.arctan2(pos2[1], pos2[0])
phi = numpy.arctan2(pos2[2], pos2[0])
newx = math.sqrt(math.pow(pos2[0], 2) + math.pow(pos2[2], 2))
newy = newx * math.tan(theta)
# find in si position (midpoint between origin and pos 2 in the x - y
# plane with x making up the difference)
x = newx / 2
y = newy / 2
if math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2) > 0:
z = math.sqrt(math.pow(dist, 2) - math.pow(x, 2) - math.pow(y, 2))
else:
z = 0
# current angle above x - y plane
r = math.sqrt(math.pow(x, 2) + math.pow(y, 2) + math.pow(z, 2))
alph = math.asin(z / r)
# when rotated back, it will rotate to angle phi+alph
opos = [r * math.cos(theta) * math.cos(alph + phi),
r * math.sin(theta) * math.cos(alph + phi),
r * math.sin(alph + phi)]
# append to the list
opositions.append([opos[0] + positions[i][0][0],
opos[1] + positions[i][0][1],
opos[2] + positions[i][0][2]])
return opositions
def main():
# convert data in file into floats and append to a position list
with open("CP_120106_008_curated.txt") as f:
content = f.readline()
string = ""
locations = []
for i in range(len(content)):
if content[i] == " ":
locations.append(float(string))
string = ""
else:
string += content[i]
locations.append(float(string))
positions = [[""]]
for i in range(len(locations)):
if i % 3 == 0:
positions[i / 3] = [locations[i]]
positions.append("")
else:
positions[i / 3].append(locations[i])
del positions[len(positions) - 1]
# sort positions for the double finder function
positions = sorted(positions)
# Create a Graph of the Input Data
xypts = []
for i in range(len(positions)):
xypts.append([positions[i][0], positions[i][1]])
# print(xypts)
points = numpy.array(xypts)
from scipy.spatial import Delaunay
tri = Delaunay(points)
print(len(tri.simplices))
# print(tri.simplices)
o_locations = []
for i in range(len(tri.simplices)):
midptx1 = 0.50 * (points[tri.simplices][i][0][0] +
points[tri.simplices][i][1][0])
midpty1 = 0.50 * (points[tri.simplices][i][0][1] +
points[tri.simplices][i][1][1])
o_locations.append([midptx1, midpty1, 0])
midptx2 = (points[tri.simplices][i][1][0] +
points[tri.simplices][i][2][0]) / 2.00
midpty2 = (points[tri.simplices][i][1][1] +
points[tri.simplices][i][2][1]) / 2.00
o_locations.append([midptx2, midpty2, 0])
midptx3 = (points[tri.simplices][i][2][0] +
points[tri.simplices][i][0][0]) / 2.00
midpty3 = (points[tri.simplices][i][2][1] +
points[tri.simplices][i][0][1]) / 2.00
o_locations.append([midptx3, midpty3, 0])
print(len(o_locations))
o_locations.sort
o_locations = sorted(o_locations)
# print(o_locations)
remove = []
for i in range(len(o_locations) - 1):
if o_locations[i] == o_locations[i + 1]:
remove.append(i + 1)
remove.sort(reverse=True)
print(len(o_locations))
# print(remove)
for i in range(len(remove)):
del (o_locations[remove[i]])
print(len(o_locations))
xOpos = []
yOpos = []
for i in range(len(o_locations)):
xOpos.append(o_locations[i][0])
yOpos.append(o_locations[i][1])
plt.triplot(points[:, 0], points[:, 1], tri.simplices.copy())
plt.plot(points[:, 0], points[:, 1], 'o')
plt.scatter(xOpos, yOpos, label='Oxygen Positions', color='r')
plt.xlabel('x (nm)')
plt.ylabel('y (nm)')
plt.title('Center Positions')
plt.legend()
plt.show()
# write O positions to an out file
out = open("OfC Positions 120106_008 Python Output.txt", "w")
out.write(str(o_locations))
out.write("nn")
positions = o_locations
# find triplets
triples = o_locator(positions)
# print(triples)
# find Si positions
si_locations = []
for j in range(len(triples)):
si_locations.append(si_finder(triples[j]))
# rings = find four(positions, .35)
delete = []
for i in range(len(delete)):
del si_locations[delete[i]]
# Plot
xSipos = []
ySipos = []
for i in range(len(si_locations)):
xSipos.append(si_locations[i][0])
ySipos.append(si_locations[i][1])
xOpos = []
yOpos = []
for i in range(len(o_locations)):
xOpos.append(o_locations[i][0])
yOpos.append(o_locations[i][1])
import matplotlib.pyplot as plt
plt.triplot(points[:, 0], points[:, 1], tri.simplices.copy())
plt.plot(points[:, 0], points[:, 1], 'o', color='k')
plt.scatter(xOpos, yOpos, label='Oxygen Positions', color='r')
plt.scatter(xSipos, ySipos, label='Silicon Positions', color='g')
plt.xlabel('x (nm)')
plt.ylabel('y (nm)')
plt.title('Center Positions')
plt.legend()
plt.show()
import matplotlib.pyplot as plt
# plt.triplot(points[:, 0], points[:, 1], tri.simplices.copy())
# plt.plot(points[:, 0], points[:, 1], 'o', color = 'k')
plt.scatter(xOpos, yOpos, label='Oxygen Positions', color='r')
plt.scatter(xSipos, ySipos, label='Silicon Positions', color='g')
plt.xlabel('x (nm)')
plt.ylabel('y (nm)')
plt.title('Center Positions')
plt.legend()
plt.show()
# == == == == == == == == == == == == == == == == == == == == == == == == == ==
# # write Si positions to an outfile
# out = open("Si Positions Output 170404.txt", "w")
# out.write(str(si_locations))
# out.write("\n")
#
# == == == == == == == == == == == == == == == == == == == == == == == == == ==
# write O positions to an out file
out = open("OfC Positions 120106_008 Python Output.txt", "w")
out.write(str(o_locations))
out.write("nn")
if __name__ == "__main__":
main()
| apache-2.0 |
EricSB/nupic | examples/sp/sp_tutorial.py | 5 | 12502 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Spatial Pooler.
The following program has the purpose of presenting some
basic properties of the Spatial Pooler. It reproduces Figs.
5, 7 and 9 from this paper: http://arxiv.org/abs/1505.02142
To learn more about the Spatial Pooler have a look at BAMI:
http://numenta.com/biological-and-machine-intelligence/
or at its class reference in the NuPIC documentation:
http://numenta.org/docs/nupic/classnupic_1_1research_1_1spatial__pooler_1_1_spatial_pooler.html
The purpose of the Spatial Pooler is to create a sparse representation
of its inputs in such a way that similar inputs will be mapped to similar
sparse representations. Thus, the Spatial Pooler should exhibit some resilience
to noise in its input.
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.research.spatial_pooler import SpatialPooler as SP
def percentOverlap(x1, x2, size):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
minX1X2 = min(nonZeroX1, nonZeroX2)
percentOverlap = 0
if minX1X2 > 0:
percentOverlap = float(np.dot(x1, x2))/float(minX1X2)
return percentOverlap
def corruptVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1
def resetVector(x1, x2):
"""
Copies the contents of vector x1 into vector x2.
@param x1 (array) binary vector to be copied
@param x2 (array) binary vector where x1 is copied
"""
size = len(x1)
for i in range(size):
x2[i] = x1[i]
random.seed(1)
uintType = "uint32"
inputDimensions = (1000,1)
columnDimensions = (2048,1)
inputSize = np.array(inputDimensions).prod()
columnNumber = np.array(columnDimensions).prod()
inputArray = np.zeros(inputSize, dtype=uintType)
for i in range(inputSize):
inputArray[i] = random.randrange(2)
activeCols = np.zeros(columnNumber, dtype=uintType)
sp = SP(inputDimensions,
columnDimensions,
potentialRadius = int(0.5*inputSize),
numActiveColumnsPerInhArea = int(0.02*columnNumber),
globalInhibition = True,
seed = 1,
synPermActiveInc = 0.01,
synPermInactiveDec = 0.008
)
# Part 1:
# -------
# A column connects to a subset of the input vector (specified
# by both the potentialRadius and potentialPct). The overlap score
# for a column is the number of connections to the input that become
# active when presented with a vector. When learning is 'on' in the SP,
# the active connections are reinforced, whereas those inactive are
# depressed (according to parameters synPermActiveInc and synPermInactiveDec.
# In order for the SP to create a sparse representation of the input, it
# will select a small percentage (usually 2%) of its most active columns,
# ie. columns with the largest overlap score.
# In this first part, we will create a histogram showing the overlap scores
# of the Spatial Pooler (SP) after feeding it with a random binary
# input. As well, the histogram will show the scores of those columns
# that are chosen to build the sparse representation of the input.
sp.compute(inputArray, False, activeCols)
overlaps = sp.getOverlaps()
activeColsScores = []
for i in activeCols.nonzero():
activeColsScores.append(overlaps[i])
print ""
print "---------------------------------"
print "Figure 1 shows an histogram of the overlap scores"
print "from all the columns in the spatial pooler, as well as the"
print "overlap scores of those columns that were selected to build a"
print "sparse representation of the input (shown in green)."
print "The SP chooses 2% of the columns with the largest overlap score"
print "to make such sparse representation."
print "---------------------------------"
print ""
bins = np.linspace(min(overlaps), max(overlaps), 28)
plt.hist(overlaps, bins, alpha=0.5, label='All cols')
plt.hist(activeColsScores, bins, alpha=0.5, label='Active cols')
plt.legend(loc='upper right')
plt.xlabel("Overlap scores")
plt.ylabel("Frequency")
plt.title("Figure 1: Column overlap of a SP with random input.")
plt.savefig("figure_1")
plt.close()
# Part 2a:
# -------
# The input overlap between two binary vectors is defined as their dot product. In order
# to normalize this value we divide by the minimum number of active inputs
# (in either vector). This means we are considering the sparser vector as reference.
# Two identical binary vectors will have an input overlap of 1, whereas two completely
# different vectors (one is the logical NOT of the other) will yield an overlap of 0.
# In this section we will see how the input overlap of two binary vectors decrease as we
# add noise to one of them.
inputX1 = np.zeros(inputSize, dtype=uintType)
inputX2 = np.zeros(inputSize, dtype=uintType)
outputX1 = np.zeros(columnNumber, dtype=uintType)
outputX2 = np.zeros(columnNumber, dtype=uintType)
for i in range(inputSize):
inputX1[i] = random.randrange(2)
x = []
y = []
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputX1, inputX2)
corruptVector(inputX2, noiseLevel)
x.append(noiseLevel)
y.append(percentOverlap(inputX1, inputX2, inputSize))
print ""
print "---------------------------------"
print "Figure 2 shows the input overlap between 2 identical binary"
print "vectors in function of the noise applied to one of them."
print "0 noise level means that the vector remains the same, whereas"
print "1 means that the vector is the logical negation of the original"
print "vector."
print "The relationship between overlap and noise level is practically"
print "linear and monotonically decreasing."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Noise level")
plt.ylabel("Input overlap")
plt.title("Figure 2: Input overlap between 2 identical vectors in function of noiseLevel.")
plt.savefig("figure_2")
plt.close()
# Part 2b:
# -------
# The output overlap between two binary input vectors is the overlap of the
# columns that become active once they are fed to the SP. In this part we
# turn learning off, and observe the output of the SP as we input two binary
# input vectors with varying level of noise.
# Starting from two identical vectors (that yield the same active columns)
# we would expect that as we add noise to one of them their output overlap
# decreases.
# In this part we will show how the output overlap behaves in function of the
# input overlap between two vectors.
# Even with an untrained spatial pooler, we see some noise resilience.
# Note that due to the non-linear properties of high dimensional SDRs, overlaps
# greater than 10 bits, or 25% in this example, are considered significant.
x = []
y = []
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputX1, inputX2)
corruptVector(inputX2, noiseLevel)
sp.compute(inputX1, False, outputX1)
sp.compute(inputX2, False, outputX2)
x.append(percentOverlap(inputX1, inputX2, inputSize))
y.append(percentOverlap(outputX1, outputX2, columnNumber))
print ""
print "---------------------------------"
print "Figure 3 shows the output overlap between two sparse representations"
print "in function of their input overlap. Starting from two identical binary vectors"
print "(which yield the same active columns) we add noise two one of them"
print "feed it to the SP, and estimate the output overlap between the two"
print "representations in terms of the common active columns between them."
print "As expected, as the input overlap decrease, so does the output overlap."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Input overlap")
plt.ylabel("Output overlap")
plt.title("Figure 3: Output overlap in function of input overlap in a SP without training")
plt.savefig("figure_3")
plt.close()
# Part 3:
# -------
# After training, a SP can become less sensitive to noise. For this purpose, we train the SP by
# turning learning on, and by exposing it to a variety of random binary vectors.
# We will expose the SP to a repetition of input patterns in order to make it learn and distinguish
# them once learning is over. This will result in robustness to noise in the inputs.
# In this section we will reproduce the plot in the last section after the SP has learned a series
# of inputs. Here we will see how the SP exhibits increased resilience to noise after learning.
# We will present 10 random vectors to the SP, and repeat this 30 times.
# Later you can try changing the number of times we do this to see how it changes the last plot.
# Then, you could also modify the number of examples to see how the SP behaves.
# Is there a relationship between the number of examples and the number of times that
# we expose them to the SP?
numExamples = 10
inputVectors = np.zeros((numExamples, inputSize), dtype=uintType)
outputColumns = np.zeros((numExamples, columnNumber), dtype=uintType)
for i in range(numExamples):
for j in range(inputSize):
inputVectors[i][j] = random.randrange(2)
# This is the number of times that we will present the input vectors to the SP
epochs = 30
for _ in range(epochs):
for i in range(numExamples):
#Feed the examples to the SP
sp.compute(inputVectors[i][:], True, outputColumns[i][:])
inputVectorsCorrupted = np.zeros((numExamples, inputSize), dtype=uintType)
outputColumnsCorrupted = np.zeros((numExamples, columnNumber), dtype=uintType)
x = []
y = []
# We will repeat the experiment in the last section for only one input vector
# in the set of input vectors
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputVectors[0][:], inputVectorsCorrupted[0][:])
corruptVector(inputVectorsCorrupted[0][:], noiseLevel)
sp.compute(inputVectors[0][:], False, outputColumns[0][:])
sp.compute(inputVectorsCorrupted[0][:], False, outputColumnsCorrupted[0][:])
x.append(percentOverlap(inputVectors[0][:], inputVectorsCorrupted[0][:], inputSize))
y.append(percentOverlap(outputColumns[0][:], outputColumnsCorrupted[0][:], columnNumber))
print ""
print "---------------------------------"
print "How robust is the SP to noise after learning?"
print "Figure 4 shows again the output overlap between two binary vectors in function"
print "of their input overlap. After training, the SP exhibits more robustness to noise"
print "in its input, resulting in a -almost- sigmoid curve. This implies that even if a"
print "previous input is presented again with a certain amount of noise its sparse"
print "representation still resembles its original."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Input overlap")
plt.ylabel("Output overlap")
plt.title("Figure 4: Output overlap in function of input overlap in a SP after training")
plt.savefig("figure_4")
plt.close()
print ""
print "+++++++++++++++++++++++++++++++++++++++++++++++++++"
print " All images generated by this script will be saved"
print " in your current working directory."
print "+++++++++++++++++++++++++++++++++++++++++++++++++++"
print ""
| agpl-3.0 |
UCL-CS35/incdb-poc | venv/share/doc/dipy/examples/piesno.py | 11 | 3970 | """
=============================
Noise estimation using PIESNO
=============================
Often, one is interested in estimating the noise in the diffusion signal. One
of the methods to do this is the Probabilistic Identification and Estimation of
Noise (PIESNO) framework [Koay2009]_. Using this method, one can detect the
standard deviation of the noise from diffusion-weighted imaging (DWI). PIESNO
also works with multiple channel DWI datasets that are acquired from N array
coils for both SENSE and GRAPPA reconstructions.
The PIESNO method works in two steps:
1) First, it finds voxels that are most likely background voxels. Intuitively,
these voxels have very similar diffusion-weighted intensities (up to some noise)
in the fourth dimension of the DWI dataset. White matter, gray matter or CSF
voxels have diffusion intensities that vary quite a lot across different
directions.
2) From these estimated background voxels and the input number of coils N,
PIESNO finds what sigma each Gaussian from each of the N coils would have
generated the observed Rician (N=1) or non-central Chi (N>1) distributed noise
profile in the DWI datasets.
PIESNO makes an important assumption: the Gaussian noise standard deviation is
assumed to be uniform. The noise is uniform across multiple slice locations or
across multiple images of the same location.
For the full details, please refer to the original paper.
In this example, we will demonstrate the use of PIESNO with a 3-shell data-set.
We start by importing necessary modules and functions and loading the data:
"""
import nibabel as nib
import numpy as np
from dipy.denoise.noise_estimate import piesno
from dipy.data import fetch_sherbrooke_3shell, read_sherbrooke_3shell
fetch_sherbrooke_3shell()
img, gtab = read_sherbrooke_3shell()
data = img.get_data()
"""
Now that we have fetched a dataset, we must call PIESNO with the right number
of coils used to acquire this dataset. It is also important to know what
was the parallel reconstruction algorithm used. Here, the data comes from a
GRAPPA reconstruction, was acquired with a 12-elements head coil available on
the Tim Trio Siemens, for which the 12 coil elements are combined into 4 groups
of 3 coil elements each. The signal is therefore received through 4 distinct
groups of receiver channels, yielding N = 4. Had we used a GE acquisition, we
would have used N=1 even if multiple channel coils are used because GE uses a
SENSE reconstruction, which has a Rician noise nature and thus N is always 1.
"""
sigma, mask = piesno(data, N=4, return_mask=True)
axial = data[:, :, data.shape[2] / 2, 0].T
axial_piesno = mask[:, :, data.shape[2] / 2].T
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2)
ax[0].imshow(axial, cmap='gray', origin='lower')
ax[0].set_title('Axial slice of the b=0 data')
ax[1].imshow(axial_piesno, cmap='gray', origin='lower')
ax[1].set_title('Background voxels from the data')
for a in ax:
a.set_axis_off()
plt.savefig('piesno.png', bbox_inches='tight')
"""
.. figure:: piesno.png
:align: center
**Showing the mid axial slice of the b=0 image (left) and estimated
background voxels (right) used to estimate the noise standard deviation**.
"""
nib.save(nib.Nifti1Image(mask, img.get_affine(), img.get_header()),
'mask_piesno.nii.gz')
print('The noise standard deviation is sigma= ', sigma)
print('The std of the background is =', np.std(data[mask[...,None].astype(np.bool)]))
"""
Here, we obtained a noise standard deviation of 7.26. For comparison, a simple
standard deviation of all voxels in the estimated mask (as done in the previous
example :ref:`example_snr_in_cc`) gives a value of 6.1.
"""
"""
.. [Koay2009] Koay C.G., E. Ozarslan, C. Pierpaoli. Probabilistic
Identification and Estimation of Noise (PIESNO): A
self-consistent approach and its applications in MRI.
JMR, 199(1):94-103, 2009.
.. include:: ../links_names.inc
"""
| bsd-2-clause |
bhargav/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
lplp8899/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
dpinney/omf | omf/scratch/faultLabeledMeterData/code/classificationPlotting.py | 1 | 4962 | import csv, time, itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix
PLOTTING_FEATURE_NUM_1 = 0
PLOTTING_FEATURE_NUM_2 = 1
VISUALIZE_DATA_ONLY = False
TRAIN_FRACTION = 0.9
INPUT_FILE = '../data/dataABEC-1mo.csv'
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# load data
x, y, header, colorList, uniqueLabels = [], [], [], [], []
colorNum = -1
lastLabel = ''
with open( INPUT_FILE,'r' ) as dataFile:
reader = csv.reader(dataFile, delimiter=',')
for row in reader:
if 'meterID' in row:
header = row
else:
datapoint = []
label = row[-1]
# restart counter when we transition to new label and update colorNum
if label != lastLabel:
count = 0
colorNum += 1
uniqueLabels.append(label)
datapoint.append(count)
colorList.append(colorNum)
# populate datapoint
for index,data in enumerate(row):
# timestamp and meterID and label are not part of the datapoint
# convert everything to float
if (index>1) and (index != (len(row)-1)):
datapoint.append(float(data))
x.append( datapoint )
y.append( label )
count += 1
lastLabel = label
newX = np.array(x)
x = np.array(x)
y = np.array(y)
ordering = np.argsort(x[:,0])
x = x[ordering]
y = y[ordering]
numPoints = x.shape[0]
print(numPoints)
split = int(TRAIN_FRACTION * numPoints)
if VISUALIZE_DATA_ONLY:
pass;
else: # classify
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB()
]
# normalize data and split into train/test
x = StandardScaler().fit_transform(x)
xTrain, xTest, yTrain, yTest = x[:split,:], x[split:,:], y[:split], y[split:]
print(xTrain.shape)
print(yTrain.shape)
print(xTest.shape)
print(yTest.shape)
# iterate over classifiers
for name, clf in zip(names, classifiers):
start = time.time()
clf.fit(xTrain, yTrain)
endTrain = time.time()
yPredicted = clf.predict(xTest)
endTest = time.time()
totalTrain = endTrain-start
totalTest = endTest-endTrain
print('Train: {} seconds, Predict: {} seconds'.format(totalTrain, totalTest))
confMat = confusion_matrix(yTest, yPredicted, labels=uniqueLabels)
score = clf.score(xTest, yTest)
plt.figure()
plot_confusion_matrix(confMat, uniqueLabels, normalize=True, title=name)
print(name,score)
print('')
# plot data
header = ['timepoint'] + header[2:-1]
plt.figure()
plt.scatter(newX[:,PLOTTING_FEATURE_NUM_1], newX[:,PLOTTING_FEATURE_NUM_2], c=colorList)
plt.colorbar()
plt.tight_layout()
plt.ylabel(header[PLOTTING_FEATURE_NUM_2])
plt.xlabel(header[PLOTTING_FEATURE_NUM_1])
plt.show() | gpl-2.0 |
LeeKamentsky/CellProfiler | cellprofiler/modules/convertobjectstoimage.py | 2 | 8638 | '''<b>Convert Objects To Image </b> converts objects you have identified into an image.
<hr>
This module allows you to take previously identified objects and convert
them into an image according to a colormap you select, which can then be saved
with the <b>SaveImages</b> modules.
<p>If you would like to save your objects but do not need a colormap,
you can by bypass this module and use the <b>SaveImages</b> module directly
by specifying "Objects" as the type of image to save.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import numpy as np
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
import cellprofiler.cpimage as cpi
import cellprofiler.preferences as cpprefs
DEFAULT_COLORMAP = "Default"
COLORCUBE = "colorcube"
LINES = "lines"
WHITE = "white"
COLORMAPS = ["Default", "autumn", "bone", COLORCUBE, "cool", "copper",
"flag", "gray", "hot", "hsv", "jet", LINES,"pink", "prism",
"spring", "summer", WHITE, "winter" ]
IM_COLOR = "Color"
IM_BINARY = "Binary (black & white)"
IM_GRAYSCALE = "Grayscale"
IM_UINT16 = "uint16"
IM_ALL = [IM_COLOR, IM_BINARY, IM_GRAYSCALE, IM_UINT16]
class ConvertObjectsToImage(cpm.CPModule):
module_name = "ConvertObjectsToImage"
category = "Object Processing"
variable_revision_number = 1
def create_settings(self):
self.object_name = cps.ObjectNameSubscriber(
"Select the input objects",cps.NONE,doc="""
Choose the name of the objects you want to convert to an image.""")
self.image_name = cps.ImageNameProvider(
"Name the output image", "CellImage",doc="""
Enter the name of the resulting image.""")
self.image_mode = cps.Choice(
"Select the color format",
IM_ALL,doc="""
Select which colors the resulting image should use. You have the following
options:
<ul>
<li><i>%(IM_COLOR)s:</i> Allows you to choose a colormap that will
produce jumbled colors for your objects. </li>
<li><i>%(IM_BINARY)s:</i> All object pixels will be assigned 1 and all
background pixels will be assigned 0, creating a binary image.</li>
<li><i>%(IM_GRAYSCALE)s:</i> Gives each object
a graylevel pixel intensity value corresponding to its number (also
called label), so it usually results in objects on the left side of the
image being very dark, progressing toward white on the right side of
the image. </li>
<li><i>%(IM_UINT16)s:</i> Assigns each object a different number,
from 1 to 65535 (the numbers that you can put in
a 16-bit integer) and numbers all pixels in each
object with the object's number. This format can
be written out as a .mat or .tiff file if you
want to process the label matrix image using
another program.</li>
</ul>
You can choose <i>Color</i> with a <i>Gray</i> colormap to produce
jumbled gray objects."""%globals())
self.colormap = cps.Colormap(
"Select the colormap",doc="""
<i>(Used only if Color output image selected)</i><br>
Choose the colormap to be used, which affects how the objects are colored.
You can look up your default colormap under <i>File > Preferences</i>.""")
def settings(self):
return [self.object_name, self.image_name, self.image_mode,
self.colormap]
def visible_settings(self):
if self.image_mode == IM_COLOR:
return [self.object_name, self.image_name, self.image_mode,
self.colormap]
else:
return [self.object_name, self.image_name, self.image_mode]
def run(self, workspace):
objects = workspace.object_set.get_objects(self.object_name.value)
alpha = np.zeros(objects.shape)
if self.image_mode == IM_BINARY:
pixel_data = np.zeros(objects.shape, bool)
elif self.image_mode == IM_GRAYSCALE:
pixel_data = np.zeros(objects.shape)
elif self.image_mode == IM_UINT16:
pixel_data = np.zeros(objects.shape, np.int32)
else:
pixel_data = np.zeros((objects.shape[0], objects.shape[1], 3))
convert = True
for labels, indices in objects.get_labels():
mask = labels != 0
if np.all(~ mask):
continue
if self.image_mode == IM_BINARY:
pixel_data[mask] = True
alpha[mask] = 1
elif self.image_mode == IM_GRAYSCALE:
pixel_data[mask] = labels[mask].astype(float) / np.max(labels)
alpha[mask] = 1
elif self.image_mode == IM_COLOR:
import matplotlib.cm
from cellprofiler.gui.cpfigure_tools import renumber_labels_for_display
if self.colormap.value == DEFAULT_COLORMAP:
cm_name = cpprefs.get_default_colormap()
elif self.colormap.value == COLORCUBE:
# Colorcube missing from matplotlib
cm_name = "gist_rainbow"
elif self.colormap.value == LINES:
# Lines missing from matplotlib and not much like it,
# Pretty boring palette anyway, hence
cm_name = "Pastel1"
elif self.colormap.value == WHITE:
# White missing from matplotlib, it's just a colormap
# of all completely white... not even different kinds of
# white. And, isn't white just a uniform sampling of
# frequencies from the spectrum?
cm_name = "Spectral"
else:
cm_name = self.colormap.value
cm = matplotlib.cm.get_cmap(cm_name)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
pixel_data[mask, :] += \
mapper.to_rgba(renumber_labels_for_display(labels))[mask, :3]
alpha[mask] += 1
elif self.image_mode == IM_UINT16:
pixel_data[mask] = labels[mask]
alpha[mask] = 1
convert = False
mask = alpha > 0
if self.image_mode == IM_BINARY:
pass
elif self.image_mode == IM_COLOR:
pixel_data[mask, :] = pixel_data[mask, :] / alpha[mask][:, np.newaxis]
else:
pixel_data[mask] = pixel_data[mask] / alpha[mask]
image = cpi.Image(pixel_data, parent_image = objects.parent_image,
convert = convert)
workspace.image_set.add(self.image_name.value, image)
if self.show_window:
workspace.display_data.ijv = objects.ijv
workspace.display_data.pixel_data = pixel_data
def display(self, workspace, figure):
pixel_data = workspace.display_data.pixel_data
figure.set_subplots((2, 1))
figure.subplot_imshow_ijv(
0, 0, workspace.display_data.ijv,
shape = workspace.display_data.pixel_data.shape[:2],
title = "Original: %s"%self.object_name.value)
if self.image_mode == IM_BINARY:
figure.subplot_imshow_bw(1, 0, pixel_data,
self.image_name.value,
sharexy = figure.subplot(0, 0))
elif pixel_data.shape[1] == 2:
figure.subplot_imshow_grayscale(1, 0, pixel_data,
self.image_name.value,
sharexy = figure.subplot(0, 0))
else:
figure.subplot_imshow_grayscale(1, 0, pixel_data,
self.image_name.value,
sharexy = figure.subplot(0, 0))
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if variable_revision_number == 1 and from_matlab:
from_matlab = False
return setting_values, variable_revision_number, from_matlab
#
# Backwards compatability
#
ConvertToImage = ConvertObjectsToImage
| gpl-2.0 |
shishaochen/TensorFlow-0.8-Win | tensorflow/contrib/learn/python/learn/estimators/base.py | 1 | 40162 | """Base estimator class."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import shutil
from six import string_types
import numpy as np
from google.protobuf import text_format
from tensorflow.python.platform import gfile
from tensorflow.python.client import session
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import training as train
from tensorflow.contrib.layers import optimizers
from tensorflow.contrib.learn.python.learn import trainer
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_train_data_feeder
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_predict_data_feeder
from tensorflow.contrib.learn.python.learn.ops.dropout_ops import DROPOUTS
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
def _write_with_backup(filename, content):
if gfile.Exists(filename):
gfile.Rename(filename, filename + '.old', overwrite=True)
with gfile.Open(filename, 'w') as f:
f.write(content)
class TensorFlowEstimator(_sklearn.BaseEstimator):
"""Base class for all TensorFlow estimators.
Parameters:
model_fn: Model function, that takes input X, y tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
clip_gradients: Clip norm of the gradients to this value to stop
gradient explosion.
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are supposed to
have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
"""
def __init__(self,
model_fn,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
class_weight=None,
continue_training=False,
config=None,
verbose=1):
self.model_fn = model_fn
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
self.continue_training = continue_training
self._initialized = False
self.class_weight = class_weight
self._config = config
def _setup_training(self):
"""Sets up graph, model and trainer."""
# Create config if not given.
if self._config is None:
self._config = RunConfig(verbose=self.verbose)
# Create new graph.
self._graph = ops.Graph()
self._graph.add_to_collection('IS_TRAINING', True)
with self._graph.as_default():
random_seed.set_random_seed(self._config.tf_random_seed)
self._global_step = variables.Variable(0,
name='global_step',
trainable=False)
# Setting up inputs and outputs.
self._inp, self._out = self._data_feeder.input_builder()
# If class weights are provided, add them to the graph.
# Different loss functions can use this tensor by name.
if self.class_weight:
self._class_weight_node = constant_op.constant(self.class_weight,
name='class_weight')
# Add histograms for X and y if they are floats.
if self._data_feeder.input_dtype in (np.float32, np.float64):
logging_ops.histogram_summary('X', self._inp)
if self._data_feeder.output_dtype in (np.float32, np.float64)\
and self._out is not None:
logging_ops.histogram_summary('y', self._out)
# Create model's graph.
self._model_predictions, self._model_loss = self.model_fn(self._inp,
self._out)
# Set up a single operator to merge all the summaries
self._summaries = logging_ops.merge_all_summaries()
# Create trainer and augment graph with gradients and optimizer.
# Additionally creates initialization ops.
learning_rate = self.learning_rate
optimizer = self.optimizer
if callable(learning_rate):
learning_rate = learning_rate(self._global_step)
if callable(optimizer):
optimizer = optimizer(learning_rate)
self._train = optimizers.optimize_loss(self._model_loss,
self._global_step,
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
# Update ops during training, e.g. batch_norm_ops
self._train = control_flow_ops.group(self._train, *
ops.get_collection('update_ops'))
# Get all initializers for all trainable variables.
self._initializers = variables.initialize_all_variables()
# Create model's saver capturing all the nodes created up until now.
self._saver = train.Saver(max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=
self._config.keep_checkpoint_every_n_hours)
# Enable monitor to create validation data dict with appropriate
# tf placeholders
self._monitor.create_val_feed_dict(self._inp, self._out)
# Create session to run model with.
self._session = session.Session(self._config.tf_master,
config=self._config.tf_config)
# Run parameter initializers.
self._session.run(self._initializers)
def _setup_summary_writer(self, logdir):
"""Sets up summary writer to prepare for later optional visualization."""
self._summary_writer = train.SummaryWriter(
os.path.join(logdir,
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')),
graph=self._session.graph)
def fit(self, X, y, monitor=None, logdir=None):
"""Builds a neural network model given provided `model_fn` and training
data X and y.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
monitor: Monitor object to print training progress and invoke early
stopping
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
# Sets up data feeder.
self._data_feeder = setup_train_data_feeder(X, y, self.n_classes,
self.batch_size)
if monitor is None:
self._monitor = monitors.default_monitor(verbose=self.verbose)
else:
self._monitor = monitor
if not self.continue_training or not self._initialized:
# Sets up model and trainer.
self._setup_training()
self._initialized = True
else:
self._data_feeder.set_placeholders(self._inp, self._out)
# Sets up summary writer for later optional visualization.
# Due to not able to setup _summary_writer in __init__ as it's not a
# parameter of the model, here we need to check if such variable exists
# and if it's None or not (in case it was setup in a previous run).
# It is initialized only in the case where it wasn't before and log dir
# is provided.
if logdir:
if (not hasattr(self, '_summary_writer') or
(hasattr(self, '_summary_writer') and self._summary_writer is None)):
self._setup_summary_writer(logdir)
else:
self._summary_writer = None
# Train model for given number of steps.
trainer.train(self._session,
self._train,
self._model_loss,
self._global_step,
self._data_feeder.get_feed_dict_fn(),
steps=self.steps,
monitor=self._monitor,
summary_writer=self._summary_writer,
summaries=self._summaries,
feed_params_fn=self._data_feeder.get_feed_params)
return self
def partial_fit(self, X, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(X, y)
def _predict(self, X, axis=-1, batch_size=None):
if not self._initialized:
raise _sklearn.NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
self._graph.add_to_collection('IS_TRAINING', False)
predict_data_feeder = setup_predict_data_feeder(X, batch_size=batch_size)
preds = []
dropouts = self._graph.get_collection(DROPOUTS)
feed_dict = {prob: 1.0 for prob in dropouts}
for data in predict_data_feeder:
feed_dict[self._inp] = data
predictions_for_batch = self._session.run(self._model_predictions,
feed_dict)
if self.n_classes > 1 and axis != -1:
preds.append(predictions_for_batch.argmax(axis=axis))
else:
preds.append(predictions_for_batch)
return np.concatenate(preds, axis=0)
def predict(self, X, axis=1, batch_size=None):
"""Predict class or regression for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(X, axis=axis, batch_size=batch_size)
def predict_proba(self, X, batch_size=None):
"""Predict class probability of the input samples X.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(X, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
return self._graph.get_tensor_by_name(name)
def __init__(self, model_fn, n_classes, batch_size=32,
steps=200, optimizer="Adagrad",
learning_rate=0.1, clip_gradients=5.0, class_weight=None,
continue_training=False,
config=None, verbose=1):
self.model_fn = model_fn
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
self.continue_training = continue_training
self._initialized = False
self.class_weight = class_weight
self._config = config
self._output_dir = None
def _setup_training(self):
"""Sets up graph, model and trainer."""
# Create config if not given.
if self._config is None:
self._config = RunConfig(verbose=self.verbose)
# Create new graph.
self._graph = ops.Graph()
self._graph.add_to_collection("IS_TRAINING", True)
with self._graph.as_default():
random_seed.set_random_seed(self._config.tf_random_seed)
self._global_step = variables.Variable(
0, name="global_step", trainable=False)
# Setting up inputs and outputs.
self._inp, self._out = self._data_feeder.input_builder()
# If class weights are provided, add them to the graph.
# Different loss functions can use this tensor by name.
if self.class_weight:
self._class_weight_node = constant_op.constant(
self.class_weight, name='class_weight')
# Add histograms for X and y if they are floats.
if self._data_feeder.input_dtype in (np.float32, np.float64):
logging_ops.histogram_summary("X", self._inp)
if self._data_feeder.output_dtype in (np.float32, np.float64)\
and self._out is not None:
logging_ops.histogram_summary("y", self._out)
# Create model's graph.
self._model_predictions, self._model_loss = self.model_fn(
self._inp, self._out)
# Create trainer and augment graph with gradients and optimizer.
# Additionally creates initialization ops.
learning_rate = self.learning_rate
optimizer = self.optimizer
if callable(learning_rate):
learning_rate = learning_rate(self._global_step)
if callable(optimizer):
optimizer = optimizer(learning_rate)
self._train = optimizers.optimize_loss(self._model_loss, self._global_step,
learning_rate=learning_rate,
optimizer=optimizer, clip_gradients=self.clip_gradients)
# Update ops during training, e.g. batch_norm_ops
self._train = control_flow_ops.group(self._train, *ops.get_collection('update_ops'))
# Merge all summaries into single tensor.
self._summaries = logging_ops.merge_all_summaries()
# Get all initializers for all trainable variables.
self._initializers = variables.initialize_all_variables()
# Create model's saver capturing all the nodes created up until now.
self._saver = train.Saver(
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=self._config.keep_checkpoint_every_n_hours)
# Enable monitor to create validation data dict with appropriate tf placeholders
self._monitor.create_val_feed_dict(self._inp, self._out)
# Create session to run model with.
self._session = session.Session(self._config.tf_master, config=self._config.tf_config)
# Run parameter initializers.
self._session.run(self._initializers)
def _setup_summary_writer(self, logdir):
"""Sets up the summary writer to prepare for later optional visualization."""
self._output_dir = os.path.join(logdir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self._summary_writer = train.SummaryWriter(self._output_dir, graph=self._session.graph)
def fit(self, X, y, monitor=None, logdir=None):
"""Builds a neural network model given provided `model_fn` and training
data X and y.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
monitor: Monitor object to print training progress and invoke early stopping
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
# Sets up data feeder.
self._data_feeder = setup_train_data_feeder(X, y,
self.n_classes,
self.batch_size)
if monitor is None:
self._monitor = monitors.default_monitor(verbose=self.verbose)
else:
self._monitor = monitor
if not self.continue_training or not self._initialized:
# Sets up model and trainer.
self._setup_training()
self._initialized = True
else:
self._data_feeder.set_placeholders(self._inp, self._out)
# Sets up summary writer for later optional visualization.
# Due to not able to setup _summary_writer in __init__ as it's not a
# parameter of the model, here we need to check if such variable exists
# and if it's None or not (in case it was setup in a previous run).
# It is initialized only in the case where it wasn't before and log dir
# is provided.
if logdir:
if (not hasattr(self, "_summary_writer") or
(hasattr(self, "_summary_writer") and self._summary_writer is None)):
self._setup_summary_writer(logdir)
else:
self._summary_writer = None
# Attach monitor to this estimator.
self._monitor.set_estimator(self)
# Train model for given number of steps.
trainer.train(
self._session, self._train,
self._model_loss, self._global_step,
self._data_feeder.get_feed_dict_fn(),
steps=self.steps,
monitor=self._monitor,
summary_writer=self._summary_writer,
summaries=self._summaries,
feed_params_fn=self._data_feeder.get_feed_params)
return self
def partial_fit(self, X, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(X, y)
def _predict(self, X, axis=-1, batch_size=None):
if not self._initialized:
raise _sklearn.NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
self._graph.add_to_collection("IS_TRAINING", False)
predict_data_feeder = setup_predict_data_feeder(
X, batch_size=batch_size)
preds = []
dropouts = self._graph.get_collection(DROPOUTS)
feed_dict = {prob: 1.0 for prob in dropouts}
for data in predict_data_feeder:
feed_dict[self._inp] = data
predictions_for_batch = self._session.run(
self._model_predictions,
feed_dict)
if self.n_classes > 1 and axis != -1:
preds.append(predictions_for_batch.argmax(axis=axis))
else:
preds.append(predictions_for_batch)
return np.concatenate(preds, axis=0)
def predict(self, X, axis=1, batch_size=None):
"""Predict class or regression for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(X, axis=axis, batch_size=batch_size)
def predict_proba(self, X, batch_size=None):
"""Predict class probability of the input samples X.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size
member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(X, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
return self._graph.get_tensor_by_name(name)
def get_tensor_value(self, name):
"""Returns value of the tensor give by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return self._session.run(self.get_tensor(name))
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if not self._initialized:
raise _sklearn.NotFittedError()
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise ValueError("Path %s should be a directory to save"
"checkpoints and graph." % path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
# Save checkpoints.
endpoints = '%s\n%s\n%s\n%s' % (
self._inp.name,
self._out.name,
self._model_predictions.name,
self._model_loss.name)
_write_with_backup(os.path.join(path, 'endpoints'), endpoints)
# Save graph definition.
_write_with_backup(os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))
# Save saver definition.
_write_with_backup(os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))
# Save checkpoints.
self._saver.save(self._session, os.path.join(path, 'model'),
global_step=self._global_step)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
self._graph = ops.Graph()
with self._graph.as_default():
endpoints_filename = os.path.join(path, 'endpoints')
if not os.path.exists(endpoints_filename):
raise ValueError("Restore folder doesn't contain endpoints.")
with gfile.Open(endpoints_filename) as foutputs:
endpoints = foutputs.read().split('\n')
graph_filename = os.path.join(path, 'graph.pbtxt')
if not os.path.exists(graph_filename):
raise ValueError("Restore folder doesn't contain graph definition.")
with gfile.Open(graph_filename) as fgraph:
graph_def = graph_pb2.GraphDef()
text_format.Merge(fgraph.read(), graph_def)
(self._inp, self._out,
self._model_predictions, self._model_loss) = importer.import_graph_def(
graph_def, name='', return_elements=endpoints)
saver_filename = os.path.join(path, 'saver.pbtxt')
if not os.path.exists(saver_filename):
raise ValueError("Restore folder doesn't contain saver definition.")
with gfile.Open(saver_filename) as fsaver:
saver_def = train.SaverDef()
text_format.Merge(fsaver.read(), saver_def)
self._saver = train.Saver(saver_def=saver_def)
# Restore trainer
self._global_step = self._graph.get_tensor_by_name('global_step:0')
self._train = self._graph.get_operation_by_name('train')
# Restore summaries.
self._summaries = self._graph.get_operation_by_name('MergeSummary/MergeSummary')
# Restore session.
if not isinstance(self._config, RunConfig):
self._config = RunConfig(verbose=self.verbose)
self._session = session.Session(
self._config.tf_master,
config=self._config.tf_config)
checkpoint_path = train.latest_checkpoint(path)
if checkpoint_path is None:
raise ValueError("Missing checkpoint files in the %s. Please "
"make sure you are you have checkpoint file that describes "
"latest checkpoints and appropriate checkpoints are there. "
"If you have moved the folder, you at this point need to "
"update manually update the paths in the checkpoint file." % path)
self._saver.restore(self._session, checkpoint_path)
# Set to be initialized.
self._initialized = True
# pylint: disable=unused-argument
@classmethod
def restore(cls, path, config=None):
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured.
Returns:
Estiamator, object of the subclass of TensorFlowEstimator.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if (isinstance(value, string_types) and
not isinstance(value, str)):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
estimator = getattr(estimators, class_name)(**model_def)
estimator._restore(path)
return estimator
def get_tensor_value(self, name):
"""Returns value of the tensor give by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return self._session.run(self.get_tensor(name))
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
with self._graph.as_default():
return [v.name for v in variables.all_variables()]
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if not self._initialized:
raise _sklearn.NotFittedError()
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise ValueError('Path %s should be a directory to save'
'checkpoints and graph.' % path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
# Save checkpoints.
endpoints = '%s\n%s\n%s\n%s' % (self._inp.name, self._out.name,
self._model_predictions.name,
self._model_loss.name)
_write_with_backup(os.path.join(path, 'endpoints'), endpoints)
# Save graph definition.
_write_with_backup(
os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))
# Save saver definition.
_write_with_backup(
os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))
# Save checkpoints.
self._saver.save(self._session,
os.path.join(path, 'model'),
global_step=self._global_step)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
self._graph = ops.Graph()
with self._graph.as_default():
endpoints_filename = os.path.join(path, 'endpoints')
if not os.path.exists(endpoints_filename):
raise ValueError("Restore folder doesn't contain endpoints.")
with gfile.Open(endpoints_filename) as foutputs:
endpoints = foutputs.read().split('\n')
graph_filename = os.path.join(path, 'graph.pbtxt')
if not os.path.exists(graph_filename):
raise ValueError("Restore folder doesn't contain graph definition.")
with gfile.Open(graph_filename) as fgraph:
graph_def = graph_pb2.GraphDef()
text_format.Merge(fgraph.read(), graph_def)
(self._inp, self._out, self._model_predictions,
self._model_loss) = importer.import_graph_def(
graph_def, name='', return_elements=endpoints)
saver_filename = os.path.join(path, 'saver.pbtxt')
if not os.path.exists(saver_filename):
raise ValueError("Restore folder doesn't contain saver definition.")
with gfile.Open(saver_filename) as fsaver:
saver_def = train.SaverDef()
text_format.Merge(fsaver.read(), saver_def)
self._saver = train.Saver(saver_def=saver_def)
# Restore trainer
self._global_step = self._graph.get_tensor_by_name('global_step:0')
self._train = self._graph.get_operation_by_name('OptimizeLoss/train')
# Restore summaries.
self._summaries = self._graph.get_operation_by_name(
'MergeSummary/MergeSummary')
# Restore session.
if not isinstance(self._config, RunConfig):
self._config = RunConfig(verbose=self.verbose)
self._session = session.Session(self._config.tf_master,
config=self._config.tf_config)
checkpoint_path = train.latest_checkpoint(path)
if checkpoint_path is None:
raise ValueError(
'Missing checkpoint files in the %s. Please '
'make sure you are you have checkpoint file that describes '
'latest checkpoints and appropriate checkpoints are there. '
'If you have moved the folder, you at this point need to '
'update manually update the paths in the checkpoint file.' % path)
self._saver.restore(self._session, checkpoint_path)
# Set to be initialized.
self._initialized = True
# pylint: disable=unused-argument
@classmethod
def restore(cls, path, config=None):
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be
reconfigured.
Returns:
Estiamator, object of the subclass of TensorFlowEstimator.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if isinstance(value, string_types) and not isinstance(value, str):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
estimator = getattr(estimators, class_name)(**model_def)
estimator._restore(path)
return estimator
class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, X):
"""Transform X using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(X, axis=1, batch_size=None))
def fit(self, X, y=None, monitor=None, logdir=None):
"""Fit a transformer."""
return(super(TensorFlowBaseTransformer, self).fit(X, y, monitor=None, logdir=None))
def fit_transform(self, X, y=None, monitor=None, logdir=None):
"""Fit transformer and transform X using trained transformer."""
return(self.fit(X, y, monitor=None, logdir=None).transform(X))
| apache-2.0 |
marionleborgne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| agpl-3.0 |
wangxianliang/facenet | tmp/visualize_vgg_model.py | 4 | 3543 | import numpy as np
from scipy import misc
import tensorflow as tf
from matplotlib import pyplot, image
import vggverydeep19
paintingStyleImage = image.imread("../data/schoolofathens.jpg")
pyplot.imshow(paintingStyleImage)
inputImage = image.imread("../data/grandcentral.jpg")
pyplot.imshow(inputImage)
outputWidth = 800
outputHeight = 600
# Beta constant
beta = 5
# Alpha constant
alpha = 100
# Noise ratio
noiseRatio = 0.6
nodes = vggverydeep19.load('../data/imagenet-vgg-verydeep-19.mat', (600, 800))
# Mean VGG-19 image
meanImage19 = np.array([103.939, 116.779, 123.68]).reshape((1,1,1,3)) #pylint: disable=no-member
# Squared-error loss of content between the two feature representations
def sqErrorLossContent(sess, modelGraph, layer):
p = session.run(modelGraph[layer])
#pylint: disable=maybe-no-member
N = p.shape[3]
M = p.shape[1] * p.shape[2]
return (1 / (4 * N * M)) * tf.reduce_sum(tf.pow(modelGraph[layer] - sess.run(modelGraph[layer]), 2))
# Squared-error loss of style between the two feature representations
styleLayers = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2),
]
def sqErrorLossStyle(sess, modelGraph):
def intermediateCalc(x, y):
N = x.shape[3]
M = x.shape[1] * x.shape[2]
A = tf.matmul(tf.transpose(tf.reshape(x, (M, N))), tf.reshape(x, (M, N)))
G = tf.matmul(tf.transpose(tf.reshape(y, (M, N))), tf.reshape(y, (M, N)))
return (1 / (4 * N**2 * M**2)) * tf.reduce_sum(tf.pow(G - A, 2))
E = [intermediateCalc(sess.run(modelGraph[layerName]), modelGraph[layerName]) for layerName, _ in styleLayers]
W = [w for _, w in styleLayers]
return sum([W[layerNumber] * E[layerNumber] for layerNumber in range(len(styleLayers))])
session = tf.InteractiveSession()
# Addition of extra dimension to image
inputImage = np.reshape(inputImage, ((1,) + inputImage.shape))
inputImage = inputImage - meanImage19
# Display image
pyplot.imshow(inputImage[0])
# Addition of extra dimension to image
paintingStyleImage = np.reshape(paintingStyleImage, ((1,) + paintingStyleImage.shape))
paintingStyleImage = paintingStyleImage - meanImage19
# Display image
pyplot.imshow(paintingStyleImage[0])
imageNoise = np.random.uniform(-20, 20, (1, outputHeight, outputWidth, 3)).astype('float32')
pyplot.imshow(imageNoise[0])
mixedImage = imageNoise * noiseRatio + inputImage * (1 - noiseRatio)
pyplot.imshow(inputImage[0])
session.run(tf.global_variables_initializer())
session.run(nodes['input'].assign(inputImage))
contentLoss = sqErrorLossContent(session, nodes, 'conv4_2')
session.run(nodes['input'].assign(paintingStyleImage))
styleLoss = sqErrorLossStyle(session, nodes)
totalLoss = beta * contentLoss + alpha * styleLoss
optimizer = tf.train.AdamOptimizer(2.0)
trainStep = optimizer.minimize(totalLoss)
session.run(tf.global_variables_initializer())
session.run(nodes['input'].assign(inputImage))
# Number of iterations to run.
iterations = 2000
session.run(tf.global_variables_initializer())
session.run(nodes['input'].assign(inputImage))
for iters in range(iterations):
session.run(trainStep)
if iters%50 == 0:
# Output every 50 iterations for animation
filename = 'output%d.png' % (iters)
im = mixedImage + meanImage19
im = im[0]
im = np.clip(im, 0, 255).astype('uint8')
misc.imsave(filename, im)
im = mixedImage + meanImage19
im = im[0]
im = np.clip(im, 0, 255).astype('uint8')
misc.imsave('finalImage.png', im)
| mit |
zonca/petsc4py | demo/bratu2d/bratu2d.py | 3 | 2355 | import sys, petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
# this user class is an application
# context for the nonlinear problem
# at hand; it contains some parametes
# and knows how to compute residuals
class Bratu2D:
def __init__(self, nx, ny, alpha, impl='python'):
self.nx = nx # x grid size
self.ny = ny # y grid size
self.alpha = alpha
if impl == 'python':
from bratu2dnpy import bratu2d
order = 'c'
elif impl == 'fortran':
from bratu2df90 import bratu2d
order = 'f'
else:
raise ValueError('invalid implementation')
self.compute = bratu2d
self.order = order
def evalFunction(self, snes, X, F):
nx, ny = self.nx, self.ny
alpha = self.alpha
order = self.order
x = X.getArray(readonly=1).reshape(nx, ny, order=order)
f = F.getArray(readonly=0).reshape(nx, ny, order=order)
self.compute(alpha, x, f)
# convenience access to
# PETSc options database
OptDB = PETSc.Options()
nx = OptDB.getInt('nx', 32)
ny = OptDB.getInt('ny', nx)
alpha = OptDB.getReal('alpha', 6.8)
impl = OptDB.getString('impl', 'python')
# create application context
# and PETSc nonlinear solver
appc = Bratu2D(nx, ny, alpha, impl)
snes = PETSc.SNES().create()
# register the function in charge of
# computing the nonlinear residual
f = PETSc.Vec().createSeq(nx*ny)
snes.setFunction(appc.evalFunction, f)
# configure the nonlinear solver
# to use a matrix-free Jacobian
snes.setUseMF(True)
snes.getKSP().setType('cg')
snes.setFromOptions()
# solve the nonlinear problem
b, x = None, f.duplicate()
x.set(0) # zero inital guess
snes.solve(b, x)
if OptDB.getBool('plot', True):
da = PETSc.DMDA().create([nx,ny])
u = da.createGlobalVec()
x.copy(u)
draw = PETSc.Viewer.DRAW()
OptDB['draw_pause'] = 1
draw(u)
if OptDB.getBool('plot_mpl', False):
try:
from matplotlib import pylab
except ImportError:
PETSc.Sys.Print("matplotlib not available")
else:
from numpy import mgrid
X, Y = mgrid[0:1:1j*nx,0:1:1j*ny]
Z = x[...].reshape(nx,ny)
pylab.figure()
pylab.contourf(X,Y,Z)
pylab.colorbar()
pylab.plot(X.ravel(),Y.ravel(),'.k')
pylab.axis('equal')
pylab.show()
| bsd-2-clause |
gef756/seaborn | examples/elaborate_violinplot.py | 30 | 1055 | """
Violinplot from a wide-form dataset
===================================
_thumb: .6, .45
"""
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
# Load the example dataset of brain network correlations
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Pull out a specific subset of networks
used_networks = [1, 3, 4, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Compute the correlation matrix and average over networks
corr_df = df.corr().groupby(level="network").mean()
corr_df.index = corr_df.index.astype(int)
corr_df = corr_df.sort_index().T
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 6))
# Draw a violinplot with a narrower bandwidth than the default
sns.violinplot(data=corr_df, palette="Set3", bw=.2, cut=1, linewidth=1)
# Finalize the figure
ax.set(ylim=(-.7, 1.05))
sns.despine(left=True, bottom=True)
| bsd-3-clause |
victorbergelin/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
bhmm/bhmm | bhmm/_external/sklearn/utils.py | 3 | 12129 | '''
Created on 07.07.2015
@author: marscher
'''
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
import six
from inspect import getargspec
class NotFittedError(Exception):
pass
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sp.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array | lgpl-3.0 |
skoslowski/gnuradio | gr-digital/examples/snr_estimators.py | 3 | 5863 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
try:
import scipy
from scipy import stats
except ImportError:
print("Error: Program requires scipy (www.scipy.org).")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: Program requires Matplotlib (matplotlib.sourceforge.net).")
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in range(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0 / 2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in range(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0 / (float(N-1.0)))*ssum
mavg = (1.0 / (float(N-1.0)))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=list(gr_estimators.keys()), default="simple",
help="Estimator type {0} [default=%default]".format(
list(gr_estimators.keys())))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr / 10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx / scale
print("SNR: ", snr)
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx / scale)
snr0 = Sknown / Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0 / scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
norheim/pextant | pextant/mesh/MeshVisualizer.py | 2 | 4269 | from bokeh.io import push_notebook, show, output_notebook
import matplotlib.pyplot as plt
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from pextant.lib.geoshapely import GeoPolygon
class TriExpandViz(object):
def __init__(self, env_model, start_point, end_point, counter_interval=10):
self.mesh = env_model.dataset.mesh
self.env_model = env_model
#self.points = GeoPolygon([start_point, end_point])
self.y, self.x = self.mesh.vertices[:, :2].transpose()
self.zfaces = self.mesh.triangles_center[:, 2]
self.counter = 0
self.counter_interval = counter_interval
def draw(self, x = (), y=()):
#px,py = self.points.to(self.env_model.ROW_COL)
plt.tripcolor(self.x, self.y, self.mesh.faces, facecolors=self.zfaces, edgecolors='k')
if len(x) != 0:
plt.plot(x, y)
plt.axis('equal')
plt.show()
def addcount(self):
self.counter += 1
if self.counter % self.counter_interval == 0:
print self.counter
if self.counter % self.counter_interval == 0:
self.draw()
def add(self, state, cost):
self.zfaces[state] = cost
class ExpandViz(object):
def __init__(self, env_model, counter_interval=1000):
self.env_model = env_model
self.expandedgrid = np.zeros((env_model.y_size, env_model.x_size))
self.counter = 0
self.counter_interval = counter_interval
self.expanded = []
#cmap = 'viridis'
def draw(self):
expanded = np.array(self.expanded).transpose()
gp_expanded = GeoPolygon(self.env_model.ROW_COL,*expanded)
upper_left, lower_right = gp_expanded.geoEnvelope()
upper_row, left_col = upper_left.to(self.env_model.ROW_COL)
lower_row, right_col = lower_right.to(self.env_model.ROW_COL)
plt.matshow(self.expandedgrid[upper_row:lower_row+1,left_col:right_col+1])
print((upper_row, lower_row), (left_col,right_col))
#print(waypoints.to(env_model.COL_ROW))
#plt.scatter(*waypoints.to(env_model.COL_ROW), c='r')
plt.show()
def drawsolution(self, rawpoints):
np_rawpoints = GeoPolygon(self.env_model.ROW_COL, *np.array(rawpoints).transpose())
plt.matshow(self.env_model.dataset)
#plt.scatter(*waypoints.to(env_model.COL_ROW), c='r')
plt.scatter(*np_rawpoints.to(self.env_model.COL_ROW), c='b')
plt.show()
def addcount(self):
self.counter += 1
if self.counter % 1000 == 0:
print self.counter
if self.counter % self.counter_interval == 0:
self.draw()
def add(self, state, cost):
self.expanded.append(np.array(state))
self.expandedgrid[state] = cost
class MeshViz:
def __init__(self, notebook=False):
self.notebook = notebook
if notebook:
output_notebook()
def viz(self, mesh, x=None, y=None, palette="Spectral11", viz=True, type="line"):
dh, dw = mesh.shape
size = max(dh, dw)
self.mesh = mesh
self.dh = dh
self.dw = dw
self.p = figure(webgl=True, title="MD2", x_axis_label='x', y_axis_label='y', x_range=[0, size], y_range=[0, size])
self.p.image(image=[mesh[::-1, :]], x=0, y=0, dw=dw, dh=dh, palette=palette)
if not x is None:
if type=="line":
self.p.line(x, self.dh - np.array(y), line_color="green", line_width=3)
else:
self.p.circle(x, self.dh - np.array(y), fill_color="yellow", line_color="black", size=10)
if self.notebook and viz:
self.t = show(self.p, notebook_handle = self.notebook)
else:
#self.t = show(self.p)
pass
def show(self):
self.t = show(self.p, notebook_handle = self.notebook)
def vizpoints(self, x, y):
print(x)
self.p.circle(y, self.dh - np.array(x), fill_color="yellow", size=10)
push_notebook(handle=self.t)
class MeshVizM:
def __init__(self):
pass
def viz(self, mesh, x=None, y=None):
plt.matshow(mesh)
plt.show()
if __name__ == '__main__':
MeshViz().viz(np.zeros([4,4])) | mit |
mhue/scikit-learn | sklearn/tests/test_random_projection.py | 142 | 14033 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
sarunya-w/CS402-PROJECT | Project/parallel_forest/v01/dataset_pickle.py | 1 | 7880 | """
GNU GENERAL PUBLIC LICENSE Version 2
Created on Tue Oct 14 18:52:01 2014
@author: Wasit
"""
import numpy as np
import os
#from PIL import Image
#from scipy.ndimage import filters
try:
import json
except ImportError:
import simplejson as json
#1800
#num_img=100
#spi=5
#
#rootdir="dataset"
#mrec=64
#mtran=64
#margin=mrec+mtran
class dataset:
def __init__(self,index=0):
'''
To create and initialise
self.dimtheta--(m)dimension of theta. theta is a column vector
self.size------(n)number of samples in the root bag
self.I---------prepocessed data
self.samples---the marix which has size of [(p+1)xn],
where p is size of vector that identify location
of a sample in self.I.
Note that the fist row of self.sample is label
'''
#1 self.cmax: maximum number of classes
#2 self.spi: number of samples per image [removed]
#3 self.theta_dim: the number of elements in a theta (a number of parameter in theta)
#4 self.size: number of all samples in the root bag
#5 self.I: the data
#6 self.samples: samples[x]=[class]
#7 self.theta_range: range of theta for generating value in getParam()
'''
Example: In order to extract LBP feature, the possible setup is theta_dim=5
when 4 dimensions is used to indicate the 2 corners of rectangular window.
The last dimension represent the bin of the LBP histogram.
Then we can set theta=[r1, c1, r2, c2, bin]^T
In this particular case (|theta| = 5 ). The theta dimension is called "theta_dim"
In the getParam() the random proposals are generated by random funtion within a curtain range, which is called "theta_range".
#3 self.theta_dim:
# r1,r2 {margin~rmax-margin},
# c1,c2 {margin~cmax-margin},
# bin {0~3}
# L1(r1c1)----L2(r1c2)
# | |
# L3(r2c1)----L4(r2c2)
'''
import pickle
self.index=index
self.path='training/dataset%02d.pic'%(self.index)
pickleFile = open(self.path, 'rb')
self.clmax,self.theta_dim,self.theta_range,self.size,self.samples,self.I = pickle.load(pickleFile)
if self.samples is None:
self.samples=np.zeros(self.I.shape[0],dtype=np.uint32)
pickleFile.close()
def __str__(self):
return '\tdatset_pickle: path=./"%s" cmax=%d, theta_dim=%d, theta_range=%d \n\
\tsize=%d, label.shape=%s, I.shape=%s'\
%(self.path,self.clmax,self.theta_dim,self.theta_range,self.size,self.samples.shape,self.I.shape)
def __del__(self):
del self.clmax
del self.theta_dim
del self.theta_range
del self.size
del self.samples#samples contains only label
del self.I
def getX(self):
'''
input:
void
output:
[1D ndarray dtype=np.uint32]
'''
return np.random.permutation(self.size)
def getL(self,x):
'''
input:
[1D ndarray dtype=np.uint32]
output:
[1D ndarray dtype=np.uint32]
'''
return self.samples[x]
def setL(self,x,L):
'''
input:
x: [1D ndarray dtype=np.uint32]
L: [1D ndarray dtype=np.uint32]
'''
self.samples[x]=L
###here
def getIs(self,thetas,x):
'''
input:
x: [1D ndarray dtype=np.uint32]\n
thetas: [2D ndarray float]
output:
[1D ndarray dtype=float]
Description:
In spiral case, it uses only first row of the thetas
'''
#dataset.getParam() calls this
#theta and x have same number of column
#3 self.theta_dim: [0_r1, 1_c1, 2_r2, 3_c2, 4_bin]^T
# r1,r2 {margin~rmax-margin},
# c1,c2 {margin~cmax-margin},
# bin {0~3}
# L1(r1c1)----L2(r1c2)
# | |
# L3(r2c1)----L4(r2c2)
##########
#6 self.samples: samples[x]=[0_class, 1_img, 2_row, 3_column]^T
# r1=self.samples[2,x]+thetas[0,:]
# c1=self.samples[3,x]+thetas[1,:]
# r2=self.samples[2,x]+thetas[2,:]
# c2=self.samples[3,x]+thetas[3,:]
# bins=thetas[self.theta_dim-1,:]
# f=np.zeros(len(x))
# for i,ix in enumerate(x):
# img=self.samples[1,ix]
# L1=self.I[img][r1[i],c1[i],bins[i]]
# L2=self.I[img][r1[i],c2[i],bins[i]]
# L3=self.I[img][r2[i],c1[i],bins[i]]
# L4=self.I[img][r2[i],c2[i],bins[i]]
# f[i]=float(L4+L1-L2-L3)
##need to check
f=np.zeros(len(x))
for i in xrange(len(x)):
f[i]=self.I[x[i],thetas[i,0]]
return f
def getI(self,theta,x):
'''
input:
x: [1D ndarray dtype=np.uint32]\n
theta: [1D ndarray float]
output:
[1D ndarray dtype=float]
Description:
In spiral case, it uses only first row of the thetas
'''
#engine.getQH() call this
##original
# r1=self.samples[2,x]+theta[0]
# c1=self.samples[3,x]+theta[1]
# r2=self.samples[2,x]+theta[2]
# c2=self.samples[3,x]+theta[3]
# bins=theta[self.theta_dim-1]
# f=np.zeros(len(x))
# for i,ix in enumerate(x):
# img=self.samples[1,ix]
# L1=self.I[img][r1[i],c1[i],bins]
# L2=self.I[img][r1[i],c2[i],bins]
# L3=self.I[img][r2[i],c1[i],bins]
# L4=self.I[img][r2[i],c2[i],bins]
# f[i]=float(L4+L1-L2-L3)
# return f
f=np.zeros(len(x))
f=self.I[x[:],theta[0]]
return f
def getParam(self,x):
'''
input:
x: [1D ndarray dtype=np.uint32]
output:
thetas: [2D ndarray float] rmax=len(x), cmax=theta_dim
taus: [1D ndarray dtype=np.uint32]
Description:
In spiral case, it uses only first row of the thetas
'''
#3 self.theta_dim: [0_r1, 1_c1, 2_r2, 3_c2, 4_bin]
#6 self.samples: samples[x]=[0_class, 1_img, 2_row, 3_column]^T
n_proposal=100
if len(x)>n_proposal:
x=np.random.permutation(x)[:n_proposal]
#ux=np.random.randint(-mtran,mtran,size=len(x))
#uy=np.random.randint(-mtran,mtran,size=len(x))
#hx=np.random.randint(8,mrec,size=len(x))
#hy=np.random.randint(8,mrec,size=len(x))
#bins=np.random.randint(0,self.dim_bin,size=len(x))
thetas=np.zeros((len(x),self.theta_dim))
thetas[:,0]=np.random.randint(0,self.theta_range,size=len(x))
thetas.astype(int)
taus = self.getIs(thetas, x)
return thetas,taus
def show(self):
#show dataset
print self.samples
if __name__ == '__main__':
# import matplotlib.pyplot as plt
dset=dataset()
print dset
x=dset.getX()
# print("number of images: {}".format(len(dset.I)))
# markers=['ko','ro','go','bo','po']
# for i in xrange(len(dset.jsonfiles)):
# f=open(dset.jsonfiles[i],"r")
# js=json.loads(f.read())
# f.close()
# img_path= rootdir + js['path'][1:]
# print(img_path)
# im=np.array(Image.open(img_path).convert('L'))
# plt.hold(False)
# plt.imshow(im)
# plt.hold(True)
# for j in range(dset.size):
# #samples[x]=[0_class,1_img, 2_row, 3_column]^T
# if dset.samples[1,j]==i:
# plt.plot(dset.samples[3,j],dset.samples[2,j],markers[dset.samples[0,j]])
# plt.set_cmap('gray')
# plt.show()
# plt.ginput()
# plt.close('all')
#-- | mit |
mgahsan/QuantEcon.py | examples/lakemodel_example.py | 3 | 3436 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 27 18:08:44 2015
Author: David Evans
Example Usage of LakeModel.py
"""
import numpy as np
import matplotlib.pyplot as plt
import LakeModel
import pandas as pd
pd.set_option('display.mpl_style', 'default') # Make the graphs a bit prettier
#Initialize Parameters
alpha = 0.013
lamb = 0.283#0.2486
b = 0.0124
d = 0.00822
g = b-d
N0 = 150.
e0 = 0.92
u0 = 1-e0
T = 50
LM = LakeModel.LakeModel(lamb,alpha,b,d)
#Find steady state
xbar = LM.find_steady_state()
#simulate stocks for T periods
E0 = e0*N0
U0 = u0*N0
X_path = np.vstack( LM.simulate_stock_path([E0,U0],T) )
plt.figure(figsize=[10,9])
plt.subplot(3,1,1)
plt.plot(X_path[:,0])
plt.title(r'Employment')
plt.subplot(3,1,2)
plt.plot(X_path[:,1])
plt.title(r'Unemployment')
plt.subplot(3,1,3)
plt.plot(X_path.sum(1))
plt.title(r'Labor Force')
plt.tight_layout()
plt.savefig('example_stock_path.png')
#simulate rates for T periods
x_path = np.vstack( LM.simulate_rate_path([e0,u0],T) )
plt.figure(figsize=[10,6])
plt.subplot(2,1,1)
plt.plot(x_path[:,0])
plt.hlines(xbar[0],0,T,'r','--')
plt.title(r'Employment Rate')
plt.subplot(2,1,2)
plt.plot(x_path[:,1])
plt.hlines(xbar[1],0,T,'r','--')
plt.title(r'Unemployment Rate')
plt.tight_layout()
plt.savefig('example_rate_path.png')
#Simulate a single agent
T = 5000
A = LakeModel.LakeModelAgent(lamb,alpha)
pi_bar = A.compute_ergodic().flatten()
sHist = np.hstack(A.simulate(1,T))
pi_u = np.cumsum(sHist)/(np.arange(T) + 1.) # time spent in unemployment after T periods
pi_e = 1- pi_u #time spent employed
plt.figure(figsize=[10,6])
plt.subplot(2,1,1)
plt.plot(range(50,T),pi_e[50:])
plt.hlines(pi_bar[0],0,T,'r','--')
plt.title('Percent of Time Employed')
plt.subplot(2,1,2)
plt.plot(range(50,T),pi_u[50:])
plt.hlines(pi_bar[1],0,T,'r','--')
plt.xlabel('Time')
plt.title('Percent of Time Unemployed')
plt.tight_layout()
plt.savefig('example_averages.png')
#==============================================================================
# Now add McCall Search Model
#==============================================================================
from scipy.stats import norm
#using quaterly data
alpha_q = (1-(1-alpha)**3)
gamma = 1.
logw_dist = norm(np.log(20.),1)
w = np.linspace(0.,175,201)# wage grid
#compute probability of each wage level
cdf = logw_dist.cdf(np.log(w))
pdf = cdf[1:]-cdf[:-1]
pdf /= pdf.sum()
w = (w[1:] + w[:1])/2
#Find the quilibirum
LME = LakeModel.LakeModel_Equilibrium(alpha_q,gamma,0.99,2.00,pdf,w)
#possible levels of unemployment insurance
cvec = np.linspace(1.,75,25)
T,W,U,EV,pi = map(np.vstack,zip(* [LME.find_steady_state_tax(c) for c in cvec]))
W= W[:]
T = T[:]
U = U[:]
EV = EV[:]
i_max = np.argmax(W)
plt.figure(figsize=[10,6])
plt.subplot(221)
plt.plot(cvec,W)
plt.xlabel(r'$c$')
plt.title(r'Welfare' )
axes = plt.gca()
plt.vlines(cvec[i_max],axes.get_ylim()[0],max(W),'k','-.')
plt.subplot(222)
plt.plot(cvec,T)
axes = plt.gca()
plt.vlines(cvec[i_max],axes.get_ylim()[0],T[i_max],'k','-.')
plt.xlabel(r'$c$')
plt.title(r'Taxes' )
plt.subplot(223)
plt.plot(cvec,pi[:,0])
axes = plt.gca()
plt.vlines(cvec[i_max],axes.get_ylim()[0],pi[i_max,0],'k','-.')
plt.xlabel(r'$c$')
plt.title(r'Employment Rate' )
plt.subplot(224)
plt.plot(cvec,pi[:,1])
axes = plt.gca()
plt.vlines(cvec[i_max],axes.get_ylim()[0],pi[i_max,1],'k','-.')
plt.xlabel(r'$c$')
plt.title(r'Unemployment Rate' )
plt.tight_layout()
plt.savefig('welfare_plot.png') | bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/shapes_and_collections/ellipse_demo.py | 1 | 1370 | """
============
Ellipse Demo
============
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
NUM = 250
ells = [Ellipse(xy=np.random.rand(2) * 10,
width=np.random.rand(), height=np.random.rand(),
angle=np.random.rand() * 360)
for i in range(NUM)]
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(np.random.rand())
e.set_facecolor(np.random.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
pltshow(plt)
| mit |
evodify/genomic-analyses_in_apache-spark | vcf_filtering_tutorial/Genomics&Spark.py | 1 | 31681 | # Databricks notebook source
# MAGIC %md # Processing genomic data in Spark
# MAGIC
# MAGIC *You can import this notebook from [here](https://github.com/evodify/genomic-analyses_in_apache-spark/tree/master/vcf_filtering_tutorial).*
# MAGIC
# MAGIC ## [Big Data: Astronomical or Genomical?](http://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1002195)
# MAGIC
# MAGIC ### Abstract
# MAGIC
# MAGIC Genomics is a Big Data science and is going to get much bigger, very soon, but it is not known whether the needs of genomics will exceed other Big Data domains. Projecting to the year 2025, we compared genomics with three other major generators of Big Data: astronomy, YouTube, and Twitter. Our estimates show that genomics is a “four-headed beast”—it is either on par with or the most demanding of the domains analyzed here in terms of data acquisition, storage, distribution, and analysis. We discuss aspects of new technologies that will need to be developed to rise up and meet the computational challenges that genomics poses for the near future. Now is the time for concerted, community-wide planning for the “genomical” challenges of the next decade.
# MAGIC
# MAGIC <img src="http://journals.plos.org/plosbiology/article/figure/image?size=large&id=10.1371/journal.pbio.1002195.g001" width="800">
# MAGIC
# MAGIC The plot shows the growth of DNA sequencing both in the total number of human genomes sequenced (left axis) as well as the worldwide annual sequencing capacity (right axis: Tera-basepairs (Tbp), Peta-basepairs (Pbp), Exa-basepairs (Ebp), Zetta-basepairs (Zbps)).
# MAGIC
# MAGIC
# MAGIC <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Historic_cost_of_sequencing_a_human_genome.svg/800px-Historic_cost_of_sequencing_a_human_genome.svg.png" width="800">
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## DNA sequencing
# MAGIC
# MAGIC <img src="https://www.yourgenome.org/sites/default/files/illustrations/process/physical_mapping_STS_yourgenome.png" width="800">
# COMMAND ----------
# MAGIC %md
# MAGIC ## Alligning millions of small DNA sequences (reads) to a reference genome
# MAGIC
# MAGIC <img src="https://github.com/evodify/genomic-analyses_in_apache-spark/raw/master/vcf_filtering_tutorial/genome_read_mapping.png" width="1000">
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # VCF
# MAGIC
# MAGIC <img src="https://hail.is/docs/stable/_images/hail-vds-rep.png" width="600">
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC // This allows easy embedding of publicly available information into any other notebook
# MAGIC // when viewing in git-book just ignore this block - you may have to manually chase the URL in frameIt("URL").
# MAGIC // Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Variant_Call_Format",500))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Hail
# MAGIC
# MAGIC [Hail](https://hail.is) is an open-source, scalable framework for exploring and analyzing genomic data. Its functionality is exposed through **Python** and backed by distributed algorithms built on top of **Apache Spark** to efficiently analyze gigabyte-scale data on a laptop or terabyte-scale data on a cluster, without the need to manually chop up data or manage job failures. Users can script pipelines or explore data interactively through **Jupyter notebooks** that flow between Hail with methods for genomics, *PySpark* with scalable *SQL* and *machine learning algorithms*, and *pandas* with *scikit-learn* and *Matplotlib* for results that fit on one machine. Hail also provides a flexible domain language to express complex quality control and analysis pipelines with concise, readable code.
# MAGIC
# MAGIC #### Scaling Genetic Data Analysis with Apache Spark
# MAGIC [](https://www.youtube.com/embed/pyeQusIN5Ao)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## This Notebook is based on the tutorial [Analyzing 1000 Genomes with Spark and Hail](https://docs.databricks.com/spark/latest/training/1000-genomes.html)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Cluster setup
# MAGIC
# MAGIC First download Hail's Python and Java libraries to your computer:
# MAGIC
# MAGIC https://storage.googleapis.com/hail-common/hail-tutorial-databricks.jar
# MAGIC
# MAGIC https://storage.googleapis.com/hail-common/hail-devel-py2.7-databricks.egg
# MAGIC
# MAGIC Then on the Databricks interface, navigate to `Workspace > Users > Username` and select `Import` from the Username drop-down menu. At the bottom of `Import Notebooks` window, click the link in `(To import a library, such as a jar or egg,`_`click here`_`)`. Upload both the .jar and .egg files using this interface, using any names you like. Make sure that the option `Attach automatically to all clusters` is checked in the success dialog.
# MAGIC
# MAGIC Next click the `Clusters` icon on the left sidebar and then `+Create Cluster`. For `Apache Spark Version`, select `Spark 2.0 (Auto-updating, Scala 2.11)`. Note that Hail won't work with Scala 2.10! In the Databricks cluster creation dialog, click `Show advanced settings` at bottom and then on the `Spark` tab, and paste the text below into the `Spark config` box.
# MAGIC
# MAGIC ```
# MAGIC spark.hadoop.io.compression.codecs org.apache.hadoop.io.compress.DefaultCodec,is.hail.io.compress.BGzipCodec,org.apache.hadoop.io.compress.GzipCodec
# MAGIC spark.sql.files.openCostInBytes 1099511627776
# MAGIC spark.sql.files.maxPartitionBytes 1099511627776
# MAGIC spark.hadoop.mapreduce.input.fileinputformat.split.minsize 1099511627776
# MAGIC spark.hadoop.parquet.block.size 1099511627776```
# MAGIC Start the cluster and attach this notebook to it by clicking on your cluster name in menu `Detached` at the top left of this workbook. Now you're ready to Hail!
# COMMAND ----------
from hail import *
hc = HailContext(sc)
# COMMAND ----------
# MAGIC %md Let's import some Python libraries for use throughout the tutorial.
# COMMAND ----------
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
from math import log, isnan
import seaborn
# COMMAND ----------
# MAGIC %md ## Import data
# MAGIC
# MAGIC We must first import variant data into Hail's internal format of Variant Dataset (VDS). We use the [import_vcf](https://hail.is/hail/hail.HailContext.html#hail.HailContext.import_vcf) method on [HailContext](https://hail.is/hail/hail.HailContext.html) to load a VCF file into Hail.
# MAGIC
# MAGIC It is recommended to load a block-compressed VCF (`.vcf.bgz`) which enables Hail to read the file in parallel. Reading files that have not been block-compressed (`.vcf`, `.vcf.gz`) is _significantly_ slower and should be avoided (though often `.vcf.gz` files are in fact block-compressed, in which case renaming to `.vcf.bgz` solves the problem).
# MAGIC
# MAGIC Unfortunately, I was not able to load `.bgz` compressed file. It worked for the existing human dataset, which is bgz compressed, but not for my data (See below).
# MAGIC
# MAGIC ### Cbp data
# MAGIC As a training data set, I use a subset of 1% from my unpublished genomic data on [*Capsella bursa-pastoris*](https://en.wikipedia.org/wiki/Capsella_bursa-pastoris) (hereafter Cbp).
# MAGIC
# MAGIC ### Human data
# MAGIC
# MAGIC You can use publicly avaliable human data by uncommenting the three line below. Jump to Cmd 19.
# COMMAND ----------
# MAGIC %md ## Download the data
# COMMAND ----------
# MAGIC %sh
# MAGIC wget https://github.com/evodify/genomic-analyses_in_apache-spark/raw/master/vcf_filtering_tutorial/Cbp31_SNPs_test0.01.vcf.gz # download the test VCF file
# MAGIC wget https://github.com/evodify/genomic-analyses_in_apache-spark/raw/master/vcf_filtering_tutorial/Cbp31_annot.csv # download the test annotation file
# MAGIC gunzip Cbp31_SNPs_test0.01.vcf.gz # uncomress the gzip file.
# COMMAND ----------
# MAGIC %sh pwd && ls -l
# COMMAND ----------
# MAGIC %md ## Move the downloaded data to DBFS
# COMMAND ----------
dbutils.fs.mkdirs("dbfs:/FileStore/tables/Cbp") # create a new directory for our data files
# COMMAND ----------
dbutils.fs.mv("file:/databricks/driver/Cbp31_SNPs_test0.01.vcf", "dbfs:/FileStore/tables/Cbp") # move
dbutils.fs.cp("file:/databricks/driver/Cbp31_annot.csv", "dbfs:/FileStore/tables/Cbp") # copy because we will need non-dbfs file for R later
# COMMAND ----------
dbutils.fs.ls("dbfs:/FileStore/tables/Cbp/")
# COMMAND ----------
# MAGIC %md ## Start processing
# COMMAND ----------
vcf_path = '/FileStore/tables/Cbp/Cbp31_SNPs_test0.01.vcf'
annotation_path = '/FileStore/tables/Cbp/Cbp31_annot.csv'
# comment out the line above and uncommend the lines below for human data
# vcf_path = '/databricks-datasets/hail/data-001/1kg_sample.vcf.bgz'
# annotation_path = '/databricks-datasets/hail/data-001/1kg_annotations.txt'
# purcell_5k_path = '/databricks-datasets/hail/data-001/purcell5k.interval_list'
# COMMAND ----------
vds = hc.import_vcf(vcf_path) # bgz import fails even with force_bgz=True. I compressed my files with bgzip from https://github.com/samtools/tabix .
# COMMAND ----------
# MAGIC %md This method produced a [VariantDataset](https://hail.is/hail/hail.VariantDataset.html), Hail's primary representation of genomic data. Following that link to Hail's python API documentation will let you see the myriad methods it offers. We will use but a few of them in this tutorial. We next use its [split_multi](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.split_multi) to split multi-allelic variants into biallelic variants. For example, the variant `1:1000:A:T,C` would become two variants: `1:1000:A:T` and `1:1000:A:C`.
# COMMAND ----------
vds = vds.split_multi()
# COMMAND ----------
# MAGIC %md We next use the [annotate_samples_table](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.annotate_samples_table) method to load phenotypic information on each sample from the sample annotations file.
# MAGIC
# MAGIC Here `annotation_path` refers to the sample annotation data file, whose first few lines are:
# MAGIC ```
# MAGIC Sample Population
# MAGIC DL174 ASI
# MAGIC GY37 ASI
# MAGIC HJC419 ASI
# MAGIC 12.4 EUR
# MAGIC 13.16 EUR
# MAGIC 16.9 EUR
# MAGIC BEL5 EUR
# MAGIC ```
# MAGIC
# MAGIC The `root` argument says where to put this data. For sample annotations, the root must start with `sa` followed by a `.` and the rest is up to you, so let's use `sa.myAnnot`.
# MAGIC
# MAGIC The `sample_expr` argument indicates that the sample ID is in column `Sample`.
# MAGIC
# MAGIC The object `TextTableConfig` allows users to provide information about column data types, header existence, comment characters, and field delimiters. For example, 'Population: Boolean'. `impute=True` will infer column types automatically.
# COMMAND ----------
dbutils.fs.head(annotation_path)
# COMMAND ----------
vds = vds.annotate_samples_table(annotation_path,
root='sa.myAnnot',
sample_expr='Sample',
config=TextTableConfig(impute=True))
# COMMAND ----------
# MAGIC %md Lastly, we'll [write](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.write) the dataset to disk so that all future computations begin by reading in the fast VDS rather than the slow VCF.
# COMMAND ----------
out_path = '/cbp.vds'
vds.write(out_path, overwrite=True)
# COMMAND ----------
# MAGIC %md ## Start exploring
# MAGIC
# MAGIC Now we're ready to start exploring! We will read back in the VDS we wrote to disk:
# COMMAND ----------
vds = hc.read(out_path)
# COMMAND ----------
# MAGIC %md First, we'll print some statistics about the size of the dataset using [count](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.count):
# COMMAND ----------
print(vds.count())
# COMMAND ----------
# MAGIC %md If the Boolean parameter `genotypes` is set to `True`, the overall call rate across all genotypes is computed as well:
# COMMAND ----------
vds.count(genotypes=True)
# COMMAND ----------
# MAGIC %md So the call rate before any QC filtering is about 84.67%.
# MAGIC
# MAGIC Let's print the types of all annotations.
# MAGIC
# MAGIC Variant annotations:
# COMMAND ----------
print(vds.variant_schema)
# COMMAND ----------
# MAGIC %md Sample annotations:
# COMMAND ----------
print(vds.sample_schema)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC We have just printed the sample and variant structure above. Global structure is empty.
# MAGIC
# MAGIC To recall the structure:
# MAGIC
# MAGIC <img src="https://hail.is/docs/stable/_images/hail-vds-rep.png" width="600">
# MAGIC
# MAGIC Also, note the annotations imported from the original VCF, as well as the sample annotations added above. Notice how those six sample annotations loaded above are nested inside `sa.structure` as defined by the `root` option in [annotate_samples_table](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.annotate_samples_table).
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Next we'll add some global annotations including the list of populations that are present in our dataset and the number of samples in each population, using the Hail expression language and the [query_samples](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.query_samples) method. The codings are:
# MAGIC
# MAGIC - ASI = Asian
# MAGIC - EUR = European
# MAGIC - ME = Middle Eastern
# MAGIC
# MAGIC We'll first build up a list of query expressions, then evaluate them all at once to save time.
# COMMAND ----------
expressions = ['samples.map(s => sa.myAnnot.Population).collect().toSet']
queries = vds.query_samples(expressions)
print('populations = %s' % queries[0])
print('total samples = %s' % vds.num_samples)
# COMMAND ----------
# MAGIC %md Now it's easy to count samples by population using the [counter](https://hail.is/expr_lang.html#counter) aggregator:
# COMMAND ----------
counter = vds.query_samples('samples.map(s => sa.myAnnot.Population).counter()')[0]
for x in counter:
print('population %s found %s times' % (x.key, x.count))
# COMMAND ----------
# MAGIC %md ## Quality control (QC)
# MAGIC
# MAGIC VCF file contains many annotations scores that define the quality of genotypes as well as quality of a variant.
# MAGIC
# MAGIC ### Filter genotypes
# MAGIC
# MAGIC Let's filter genotypes based on genotype quality (GQ) and read coverage (DP).
# MAGIC
# MAGIC Here `g` is genotype, `v` is variant, `s` is sample, and annotations are accessible via `va`, `sa`, and `global`.
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC // This allows easy embedding of publicly available information into any other notebook
# MAGIC // when viewing in git-book just ignore this block - you may have to manually chase the URL in frameIt("URL").
# MAGIC // Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Phred_quality_score",500))
# COMMAND ----------
filter_condition_gDP_gGQ = 'g.dp >= 10 && g.gq >= 20'
vds_gDP_gGQ = vds.filter_genotypes(filter_condition_gDP_gGQ)
# COMMAND ----------
vds_gDP_gGQ.count(genotypes=True)
# COMMAND ----------
# MAGIC %md Now the call rate is about 50%, so nearly 35% of genotypes failed the filter. Filtering out a genotype is equivalent to setting the genotype call to missing.
# MAGIC
# MAGIC Having removed suspect genotypes, let's next remove variants with low call rate and then calculate summary statistics per sample with the [sample_qc](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.sample_qc) method.
# COMMAND ----------
vds_gDP_gGQ_vCR = (vds_gDP_gGQ
.filter_variants_expr('gs.fraction(g => g.isCalled) >= 0.50')
.sample_qc())
# COMMAND ----------
# MAGIC %md
# MAGIC Check how many variants retained after filtering.
# COMMAND ----------
vds_gDP_gGQ_vCR.count(genotypes=True)
# COMMAND ----------
# MAGIC %md ### Filter samples
# COMMAND ----------
# MAGIC %md The call rate for each variant is calculated using the `fraction` [aggregable](https://hail.is/expr_lang.html#aggregables) on the genotypes `gs`. [sample_qc](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.sample_qc) adds a number of statistics to sample annotations:
# COMMAND ----------
print(vds_gDP_gGQ_vCR.sample_schema)
# COMMAND ----------
# MAGIC %md Let's export these sample annotations to a text file and take a look at them:
# COMMAND ----------
vds_gDP_gGQ_vCR.export_samples('file:///sampleqc.txt', 'Sample = s.id, sa.qc.*')
# COMMAND ----------
# MAGIC %sh
# MAGIC head /sampleqc.txt | cut -f 1-8 | column -t
# COMMAND ----------
# MAGIC %md We can further analyze these results locally using Python's [matplotlib](http://matplotlib.org/) library. Below is an example plot of three variables (call rate, mean depth and mean GQ), along with the code that generate the plot.
# COMMAND ----------
sampleqc_table = vds_gDP_gGQ_vCR.samples_keytable().to_pandas()
plt.clf() # clears the entire current figure with all its axes, but leaves the window opened, such that it may be reused for other plots.
plt.figure(figsize=(7,4)) # figure size in inches; change according to your screen size and resolution
plt.subplot(1, 3, 1)
plt.hist(sampleqc_table["sa.qc.callRate"], bins=np.arange(0.4, 1.01, .05))
plt.xlabel("Call Rate")
plt.ylabel("Frequency")
plt.xlim(0.4, 1)
plt.axvline(.50, color='r')
plt.subplot(1, 3, 2)
plt.hist(sampleqc_table["sa.qc.dpMean"], bins=np.arange(20, 80, 10))
plt.xlabel("Mean depth")
plt.ylabel("Frequency")
plt.xlim(10, 80)
plt.axvline(10, color='r')
plt.subplot(1, 3, 3)
plt.hist(sampleqc_table["sa.qc.gqMean"], bins=np.arange(20, 100, 10))
plt.xlabel("Mean Sample GQ")
plt.ylabel("Frequency")
plt.axvline(30, color = 'r')
plt.xlim(0, 100)
plt.tight_layout()
plt.show()
display()
# COMMAND ----------
# MAGIC %md
# MAGIC You can remove samples that are outliers in the plots above, where cutoffs are given by the red lines. But there are no outliers here. If we had to filter, we could do this step:
# COMMAND ----------
vds_gDP_gGQ_vCR_sDP_sGT = (vds_gDP_gGQ_vCR
.annotate_samples_vds(vds_gDP_gGQ_vCR, code = 'sa.qc = vds.qc' )
.filter_samples_expr('sa.qc.dpMean > 0.50 && sa.qc.dpMean >=10 && sa.qc.gqMean >= 20 '))
# COMMAND ----------
# MAGIC %md As before, we can count the number of samples that remain in the dataset after filtering. (But nothing has been filtered out here)
# COMMAND ----------
vds_gDP_gGQ_vCR_sDP_sGT.count(genotypes=True)
# COMMAND ----------
# MAGIC %md ### Filter variants
# MAGIC
# MAGIC We now have `vds_gDP_gGQ_vCR_sDP_sGT`, a VDS where low-quality genotypes and samples have been removed.
# MAGIC
# MAGIC Let's use the [variant_qc](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.variant_qc) method to start exploring variant metrics:
# COMMAND ----------
vds_gDP_gGQ_vCR_sDP_sGT = vds_gDP_gGQ_vCR_sDP_sGT.variant_qc()
print(vds_gDP_gGQ_vCR_sDP_sGT.variant_schema)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC Next, we will filter variants following the [Best Practices GATK recommendations](https://gatkforums.broadinstitute.org/gatk/discussion/2806/howto-apply-hard-filters-to-a-call-set).
# MAGIC
# MAGIC These recommendations are for human data, but our data is not human and the distribution of quality statistics will differ from expected for human. (Explain why it is so is beyond the scope of this tutorial.)
# MAGIC
# MAGIC Let's have a look at the distribution of different variant quality statistics:
# MAGIC
# MAGIC - QD - variant confidence standardized by depth.
# MAGIC
# MAGIC This annotation puts the variant confidence QUAL score into perspective by normalizing for the amount of coverage available. Because each read contributes a little to the QUAL score, variants in region with deep coverage can have artificially inflated QUAL scores, giving the impression that the call is supported by more evidence than it really is. To compensate for this, we normalize the variant confidence by depth, which gives us a more objective picture of how well supported the call is.
# MAGIC
# MAGIC - MQ - Mapping quality of a SNP.
# MAGIC
# MAGIC - FS - strand bias in support for REF vs ALT allele calls.
# MAGIC
# MAGIC Strand bias is a type of sequencing bias in which one DNA strand is favored over the other, which can result in incorrect evaluation of the amount of evidence observed for one allele vs. the other. The FisherStrand annotation is one of several methods that aims to evaluate whether there is strand bias in the data. It uses Fisher's Exact Test to determine if there is strand bias between forward and reverse strands for the reference or alternate allele. The output is a Phred-scaled p-value. The higher the output value, the more likely there is to be bias. More bias is indicative of false positive calls.
# MAGIC
# MAGIC - SOR - sequencing bias in which one DNA strand is favored over the other
# MAGIC
# MAGIC Strand bias is a type of sequencing bias in which one DNA strand is favored over the other, which can result in incorrect evaluation of the amount of evidence observed for one allele vs. the other. It is used to determine if there is strand bias between forward and reverse strands for the reference or alternate allele. The reported value is ln-scaled.
# MAGIC
# MAGIC - MQRankSum - Rank sum test for mapping qualities of REF vs. ALT reads.
# MAGIC
# MAGIC This variant-level annotation compares the mapping qualities of the reads supporting the reference allele with those supporting the alternate allele. The ideal result is a value close to zero, which indicates there is little to no difference. A negative value indicates that the reads supporting the alternate allele have lower mapping quality scores than those supporting the reference allele. Conversely, a positive value indicates that the reads supporting the alternate allele have higher mapping quality scores than those supporting the reference allele.
# MAGIC
# MAGIC - ReadPosRankSum - do all the reads support a SNP call tend to be near the end of a read.
# MAGIC
# MAGIC The ideal result is a value close to zero, which indicates there is little to no difference in where the alleles are found relative to the ends of reads. A negative value indicates that the alternate allele is found at the ends of reads more often than the reference allele. Conversely, a positive value indicates that the reference allele is found at the ends of reads more often than the alternate allele.
# COMMAND ----------
# MAGIC %md
# MAGIC We've once again used matplotlib to make histograms of these siz summary statistics.
# COMMAND ----------
variantqc_table = vds_gDP_gGQ_vCR_sDP_sGT.variants_keytable().to_pandas()
plt.clf() # clears the entire current figure with all its axes, but leaves the window opened, such that it may be reused for other plots.
plt.figure(figsize=(7,7)) # figure size in inches; change according to your screen size and resolution
plt.subplot(3, 2, 1)
variantgq_MQ = variantqc_table["va.info.MQ"]
plt.hist(variantgq_MQ.dropna(), bins = np.arange(0, 101, 2)) # It is important to add dropna() to skip NA values. Otherwise, the script won't work.
plt.xlabel("MQ")
plt.ylabel("Frequency")
plt.xlim(0, 100)
plt.axvline(30, color = 'r')
plt.subplot(3, 2, 2)
variantgq_SOR = variantqc_table["va.info.SOR"]
plt.hist(variantgq_SOR.dropna(), bins = np.arange(0, 8, 0.2))
plt.xlabel("SOR")
plt.ylabel("Frequency")
plt.xlim(0, 8)
plt.axvline(4, color = 'r')
plt.subplot(3, 2, 3)
variantgq_QD = variantqc_table["va.info.QD"]
plt.hist(variantgq_QD.dropna(), bins = np.arange(0, 40, 1))
plt.xlabel("QD")
plt.ylabel("Frequency")
plt.xlim(0, 40)
plt.axvline(2, color = 'r')
plt.subplot(3, 2, 4)
variantgq_FS = variantqc_table["va.info.FS"]
plt.hist(variantgq_FS.dropna(), bins = np.arange(0, 100, 2))
plt.xlabel("FS")
plt.ylabel("Frequency")
plt.xlim(0, 100)
plt.axvline(60, color = 'r')
plt.subplot(3, 2, 5)
variantgq_MQRankSum = variantqc_table["va.info.MQRankSum"]
plt.hist(variantgq_MQRankSum.dropna(), bins = np.arange(-20, 20, 1))
plt.xlabel("MQRankSum")
plt.ylabel("Frequency")
plt.xlim(-25, 25)
plt.axvline(-20, color = 'r')
plt.subplot(3, 2, 6)
variantgq_ReadPosRankSum = variantqc_table["va.info.ReadPosRankSum"]
plt.hist(variantgq_ReadPosRankSum.dropna(), bins = np.arange(-20, 20, 0.5))
plt.xlabel("ReadPosRankSum")
plt.ylabel("Frequency")
plt.xlim(-12, 12)
plt.axvline(-10, color = 'r')
plt.axvline(10, color = 'r')
plt.tight_layout()
plt.show()
display()
# COMMAND ----------
# MAGIC %md
# MAGIC Lastly we use the [filter_variants_expr](https://hail.is/hail/hail.VariantDataset.html#hail.VariantDataset.filter_variants_expr) method to keep only those variants that meet the cut-off requirements (red lines in the plots above).
# COMMAND ----------
vds_gDP_gGQ_vCR_sDP_sGT_vFilter = vds_gDP_gGQ_vCR_sDP_sGT.filter_variants_expr('va.info.MQ >= 30.00 && va.info.SOR <= 4.000 && va.info.QD >= 2.00 && va.info.FS <= 60.000 && va.info.MQRankSum >= -20.000 && va.info.ReadPosRankSum >= -10.000 && va.info.ReadPosRankSum <= 10.000')
print('variants before filtering: %d' % vds_gDP_gGQ_vCR_sDP_sGT.count_variants())
print('variants after filtering: %d' % vds_gDP_gGQ_vCR_sDP_sGT_vFilter.count_variants())
# COMMAND ----------
# MAGIC %md Verify the filtering results with plots:
# COMMAND ----------
variantqc_table = vds_gDP_gGQ_vCR_sDP_sGT_vFilter.variants_keytable().to_pandas()
plt.clf() # clears the entire current figure with all its axes, but leaves the window opened, such that it may be reused for other plots.
plt.figure(figsize=(7,7)) # figure size in inches; change according to your screen size and resolution
plt.subplot(3, 2, 1)
variantgq_MQ = variantqc_table["va.info.MQ"]
plt.hist(variantgq_MQ.dropna(), bins = np.arange(0, 101, 2))
plt.xlabel("MQ")
plt.ylabel("Frequency")
plt.xlim(0, 100)
plt.axvline(30, color = 'r')
plt.subplot(3, 2, 2)
variantgq_SOR = variantqc_table["va.info.SOR"]
plt.hist(variantgq_SOR.dropna(), bins = np.arange(0, 8, 0.2))
plt.xlabel("SOR")
plt.ylabel("Frequency")
plt.xlim(0, 8)
plt.axvline(4, color = 'r')
plt.subplot(3, 2, 3)
variantgq_QD = variantqc_table["va.info.QD"]
plt.hist(variantgq_QD.dropna(), bins = np.arange(0, 40, 1))
plt.xlabel("QD")
plt.ylabel("Frequency")
plt.xlim(0, 40)
plt.axvline(2, color = 'r')
plt.subplot(3, 2, 4)
variantgq_FS = variantqc_table["va.info.FS"]
plt.hist(variantgq_FS.dropna(), bins = np.arange(0, 100, 2))
plt.xlabel("FS")
plt.ylabel("Frequency")
plt.xlim(0, 100)
plt.axvline(60, color = 'r')
plt.subplot(3, 2, 5)
variantgq_MQRankSum = variantqc_table["va.info.MQRankSum"]
plt.hist(variantgq_MQRankSum.dropna(), bins = np.arange(-20, 20, 1))
plt.xlabel("MQRankSum")
plt.ylabel("Frequency")
plt.xlim(-25, 25)
plt.axvline(-20, color = 'r')
plt.subplot(3, 2, 6)
variantgq_ReadPosRankSum = variantqc_table["va.info.ReadPosRankSum"]
plt.hist(variantgq_ReadPosRankSum.dropna(), bins = np.arange(-20, 20, 0.5))
plt.xlabel("ReadPosRankSum")
plt.ylabel("Frequency")
plt.xlim(-12, 12)
plt.axvline(-10, color = 'r')
plt.axvline(10, color = 'r')
plt.tight_layout()
plt.show()
display()
# COMMAND ----------
# MAGIC %md ## PCA
# MAGIC
# MAGIC To check if there is any genetic structure, we will use a principal component analysis (PCA).
# COMMAND ----------
vds_pca = (vds_gDP_gGQ_vCR_sDP_sGT_vFilter.pca(scores='sa.pca'))
# COMMAND ----------
# MAGIC %md We can then make a Python plot of the samples in PC space colored by population group:
# COMMAND ----------
pca_table = vds_pca.samples_keytable().to_pandas()
colors = {'ASI': 'green', 'EUR': 'red', 'ME': 'blue'}
plt.clf()
plt.figure(figsize=(7,7)) # figure size in inches; change according to your screen size and resolution
plt.scatter(pca_table["sa.pca.PC1"], pca_table["sa.pca.PC2"], c = pca_table["sa.myAnnot.Population"].map(colors), alpha = .7, s=100)
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.xlim(-0.7, 0.7)
plt.ylim(-0.7, 0.7)
legend_entries = [mpatches.Patch(color= c, label=myAnnot) for myAnnot, c in colors.items()]
plt.legend(handles=legend_entries, prop={'size': 15})
plt.show()
display()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC This plot reflects the known [population structure in *Capsella bursa-pastoris*](http://onlinelibrary.wiley.com/doi/10.1111/mec.13491/full).
# MAGIC
# MAGIC You can see the location of these samples with some R code:
# COMMAND ----------
# MAGIC %md Install packages that are necessary to produce a geographic map:
# COMMAND ----------
# MAGIC %r
# MAGIC install.packages(c("maps", "rworldmap"))
# COMMAND ----------
# MAGIC %r
# MAGIC library(rworldmap)
# MAGIC library(maps)
# MAGIC
# MAGIC geo <- read.table("/databricks/driver/Cbp31_annot.csv", header = T) # read file not from dbfs
# MAGIC
# MAGIC newmap <- getMap(resolution = "hight") # create a map object
# MAGIC
# MAGIC pchS <- c(rep(15, 11), rep(19, 13), rep(17, 7)) # designate genetic clusters with different colours
# MAGIC colS <- c(rep("green", 11), rep("red", 13), rep("blue", 7))
# MAGIC groupsL <- c('ASI', 'EUR', 'ME')
# MAGIC pchL <- c(15, 19, 17)
# MAGIC colL <- c("green", "red", "blue")
# MAGIC
# MAGIC par(mar=c(3, 3, 2, 2), cex=1)
# MAGIC plot(newmap, xlim = c(-40, 140), ylim = c(20, 50), asp = 1, bg='#DCDCDC', lty=3, lwd=0.3, col="#ffffff")
# MAGIC map.axes()
# MAGIC points(x=geo$Longitude, y=geo$Latitude, pch=pchS, cex=1.2, col=colS)
# MAGIC legend("topright", leg=groupsL, pch=pchL, col=colL, ncol=1, pt.cex=1.2, bg="#ffffff")
# COMMAND ----------
# MAGIC %md ## Summary
# MAGIC
# MAGIC Data filtering:
# MAGIC - Filter genotypes
# MAGIC - Filter samples
# MAGIC - Filter variants
# MAGIC
# MAGIC *Variants can be filtered before samples filtering if samples are of greater priority in a study.*
# MAGIC
# MAGIC Such genetic data can be analyzed in various ways. A PCA is just one simple example.
# COMMAND ----------
# MAGIC %md ## Additional
# MAGIC
# MAGIC It is recommended to go through the original [Analysis of 1000 Genomes with Spark and Hail](https://docs.databricks.com/spark/latest/training/1000-genomes.html).
# MAGIC
# MAGIC You can also read [Hail Overview](https://www.hail.is/hail/overview.html), look through the [Hail objects](https://www.hail.is/hail/hail_objects.html) representing many core concepts in genetics, and check out the many Hail functions defined in the [Python API](https://hail.is/hail/api.html).
| mit |
menglin0320/resnet_try | examples/finetune_flickr_style/assemble_data.py | 38 | 3636 | #!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
| mit |
fzalkow/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
evgchz/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 21 | 13756 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
"""Test csc_row_median actually calculates the median."""
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
JorgeDeLosSantos/NanchiPlot | nanchi/uimpl.py | 1 | 3881 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
import matplotlib.lines as lines
import numpy as np
import wx
class ZoomRectangle(object):
def __init__(self,figure,axes,canvas):
self.canvas = canvas
self.figure = figure
self.axes = axes
self.cline = lines.Line2D([],[], color="#00ff00", ls="--")
def connect(self):
print "connect: ",self.canvas,self.figure,self.axes
self.btpress = self.canvas.mpl_connect("button_press_event", self.on_press)
self.btrelease = self.canvas.mpl_connect("button_release_event", self.on_release)
print self.btpress, self.btrelease
self.axes.add_line(self.cline)
def on_motion(self,event):
self.cline.set_xdata([])
self.cline.set_ydata([])
# ---
self.x = event.xdata
self.y = event.ydata
# ---
xdata = [self.x0, self.x0, self.x, self.x, self.x0]
ydata = [self.y0, self.y, self.y, self.y0, self.y0]
# ---
self.cline.set_xdata(xdata)
self.cline.set_ydata(ydata)
# ---
self.canvas.draw()
def on_press(self,event):
#~ print "Press"
self.x0 = event.xdata
self.y0 = event.ydata
self.motion = self.canvas.mpl_connect("motion_notify_event", self.on_motion)
def on_release(self,event):
"Release"
self.canvas.mpl_disconnect(self.motion)
self.canvas.mpl_disconnect(self.btpress)
self.canvas.mpl_disconnect(self.btrelease)
min_x = min([self.x0, self.x])
max_x = max([self.x0, self.x])
min_y = min([self.y0, self.y])
max_y = max([self.y0, self.y])
self.axes.set_xlim(min_x, max_x)
self.axes.set_ylim(min_y, max_y)
self.canvas.draw()
class FigureCanvas(FigureCanvasWxAgg):
def __init__(self,parent,id,figure,**kwargs):
FigureCanvasWxAgg.__init__(self,parent=parent, id=id, figure=figure,**kwargs)
self.figure = figure
self.axes = self.figure.get_axes()[0]
def disconnect_all(self):
try:
self.mpl_disconnect(self.motion)
self.mpl_disconnect(self.btpress)
self.mpl_disconnect(self.btrelease)
except:
pass
def zoomit(self):
self.cline = lines.Line2D([],[], color="#ff00ff", ls="--", lw=2.0)
self.btpress = self.mpl_connect("button_press_event", self.on_press)
self.btrelease = self.mpl_connect("button_release_event", self.on_release)
self.axes.add_line(self.cline)
def on_motion(self,event):
self.cline.set_xdata([])
self.cline.set_ydata([])
# ---
self.x = event.xdata
self.y = event.ydata
# ---
xdata = [self.x0, self.x0, self.x, self.x, self.x0]
ydata = [self.y0, self.y, self.y, self.y0, self.y0]
# ---
self.cline.set_xdata(xdata)
self.cline.set_ydata(ydata)
# ---
self.draw()
def on_press(self,event):
self.x0 = event.xdata
self.y0 = event.ydata
self.motion = self.mpl_connect("motion_notify_event", self.on_motion)
def on_release(self,event):
self.disconnect_all()
try:
self.cline.remove() # Delete box
except:
self.stop_event_loop()
min_x = min([self.x0, self.x])
max_x = max([self.x0, self.x])
min_y = min([self.y0, self.y])
max_y = max([self.y0, self.y])
self.axes.set_xlim(min_x, max_x)
self.axes.set_ylim(min_y, max_y)
self.draw()
if __name__ == '__main__':
plt.plot([1,2,3,12,1,3])
fig = plt.gcf()
ax = plt.gca()
zr = ZoomRectangle(fig,ax,fig.canvas)
zr.connect()
plt.show()
| mit |
cdfassnacht/CodeCDF | python/make_finding_chart.py | 1 | 2229 | import wcs,scipy,pyfits,pylab,sys
from scipy import ndimage
def postage_stamp(input,output,locations,xsize,ysize,scale,angle):
pylab.close()
# f = open(locations).readlines()[0].split()
# ra = wcs.ra2deg(f[3]+":"+f[4]+":"+f[5])
# dec = wcs.dec2deg(f[6]+":"+f[7]+":"+f[8])
#
# if wcs.is_degree(ra)==False:
# ra = wcs.ra2deg(ra)
# if wcs.is_degree(dec)==False:
# dec = wcs.dec2deg(dec)
ra = 317.72512
dec = 21.516883
outheader = wcs.make_header(ra,dec,xsize,ysize,scale)
outheader = wcs.rotate_header(outheader,angle)
coords = scipy.indices((ysize,xsize)).astype(scipy.float32)
skycoords = wcs.pix2sky(outheader,coords[1],coords[0])
file = pyfits.open(input)
inheader = file[0].header.copy()
data = file[0].data.copy()
ccdcoords = wcs.sky2pix(inheader,skycoords[0],skycoords[1])
coords[1] = ccdcoords[0]
coords[0] = ccdcoords[1]
image = ndimage.map_coordinates(data,coords,output=scipy.float64)
bounds = scipy.sort(image.flatten())
vmin = bounds[bounds.size*0.65]
vmax = bounds[bounds.size*0.995]
pylab.imshow(image[::-1],cmap=pylab.cm.gist_yarg,vmin=vmin,vmax=vmax)
pylab.axis('off')
title = r"B2108 Finding Chart"
pylab.text(420,320,r"Star TO Target offsets:")
pylab.text(440,370,r"11.91$^{\prime\prime}$ S, 7.20$^{\prime\prime}$ W")
pylab.text(440,420,r"Slit PA 60 degrees E from N")
pylab.title(title)
length = 10./scale
dx = length
dy = length
pylab.arrow(300,300,-1.*dx,0)
pylab.arrow(300,300,0,-1*dx)
pylab.rc('text',usetex=True)
pylab.text(310,290-dy,'N')
pylab.text(310.,290-dy/2.,r'10$^{\prime\prime}$')
ax = pylab.gca()
ax.figure.set_size_inches((7.5,7.5))
import matplotlib as mpl
a = 0
for i in locations:
ra = i[0]
dec = i[1]
x,y = wcs.sky2pix(outheader,ra,dec)
y -= ysize/2.
y *= -1
y += ysize/2.
if a==0:
a = 1
pylab.text(x+30,y-30,"Target")
else:
pylab.text(x+30,y-30,"Offset Star")
ax.patches.append(pylab.Circle((x,y),25,transform=ax.transData,fill=False,ec='r',lw=1.5))
pylab.savefig(output+".eps")
import os
os.system('/usr/bin/convert %s.eps %s.png' % (output,output))
inname = sys.argv[1]
outname = sys.argv[2]
loc = [[317.72512,21.516883],[317.72868,21.518882]]
postage_stamp(inname,outname,loc,80/0.05,80/0.05,0.05,0.)
| mit |
niltonlk/nest-simulator | pynest/examples/brunel_delta_nest.py | 8 | 11657 | # -*- coding: utf-8 -*-
#
# brunel_delta_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Random balanced network (delta synapses)
----------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in [1]_
When connecting the network, customary synapse models are used, which
allow for querying the number of created synapses. Using spike
recorders, the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
References
~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of sparsely connected networks of excitatory and
inhibitory spiking neurons. Journal of Computational Neuroscience 8,
183-208.
"""
###############################################################################
# Import all necessary modules for simulation, analysis and plotting.
import time
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the current time to a variable in order to determine the build
# time of the network.
startbuild = time.time()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
###############################################################################
# Definition of the parameters crucial for asynchronous irregular firing of
# the neurons.
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons in the network and the number of neurons
# recorded from
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
###############################################################################
# Definition of connectivity parameters
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the integrate and fire neuron and the
# synapses. The parameters of the neuron are stored in a dictionary.
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_ex = J # amplitude of excitatory postsynaptic potential
J_in = -g * J_ex # amplitude of inhibitory postsynaptic potential
###############################################################################
# Definition of threshold rate, which is the external rate needed to fix the
# membrane potential around its threshold, the external firing rate and the
# rate of the poisson generator which is multiplied by the in-degree CE and
# converted to Hz by multiplication by 1000.
nu_th = theta / (J * CE * tauMem)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting ``print_time`` to `True` prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Creation of the nodes using ``Create``. We store the returned handles in
# variables for later reference. Here the excitatory and inhibitory, as well
# as the poisson generator and two spike recorders. The spike recorders will
# later be used to record excitatory and inhibitory spikes. Properties of the
# nodes are specified via ``params``, which expects a dictionary.
nodes_ex = nest.Create("iaf_psc_delta", NE, params=neuron_params)
nodes_in = nest.Create("iaf_psc_delta", NI, params=neuron_params)
noise = nest.Create("poisson_generator", params={"rate": p_rate})
espikes = nest.Create("spike_recorder")
ispikes = nest.Create("spike_recorder")
###############################################################################
# Configuration of the spike recorders recording excitatory and inhibitory
# spikes by sending parameter dictionaries to ``set``. Setting the property
# `record_to` to *"ascii"* ensures that the spikes will be recorded to a file,
# whose name starts with the string assigned to the property `label`.
espikes.set(label="brunel-py-ex", record_to="ascii")
ispikes.set(label="brunel-py-in", record_to="ascii")
print("Connecting devices")
###############################################################################
# Definition of a synapse using ``CopyModel``, which expects the model name of
# a pre-defined synapse, the name of the customary synapse and an optional
# parameter dictionary. The parameters defined in the dictionary will be the
# default parameter for the customary synapse. Here we define one synapse for
# the excitatory and one for the inhibitory connections giving the
# previously defined weights and equal delays.
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
###############################################################################
# Connecting the previously defined poisson generator to the excitatory and
# inhibitory neurons using the excitatory synapse. Since the poisson
# generator is connected to all neurons in the population the default rule
# (# ``all_to_all``) of ``Connect`` is used. The synaptic properties are inserted
# via ``syn_spec`` which expects a dictionary when defining multiple variables
# or a string when simply using a pre-defined synapse.
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
###############################################################################
# Connecting the first ``N_rec`` nodes of the excitatory and inhibitory
# population to the associated spike recorders using excitatory synapses.
# Here the same shortcut for the specification of the synapse as defined
# above is used.
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
###############################################################################
# Connecting the excitatory population to all neurons using the pre-defined
# excitatory synapse. Beforehand, the connection parameter are defined in a
# dictionary. Here we use the connection rule ``fixed_indegree``,
# which requires the definition of the indegree. Since the synapse
# specification is reduced to assigning the pre-defined excitatory synapse it
# suffices to insert a string.
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
###############################################################################
# Connecting the inhibitory population to all neurons using the pre-defined
# inhibitory synapse. The connection parameters as well as the synapse
# parameters are defined analogously to the connection from the excitatory
# population defined above.
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
###############################################################################
# Storage of the time point after the buildup of the network in a variable.
endbuild = time.time()
###############################################################################
# Simulation of the network.
print("Simulating")
nest.Simulate(simtime)
###############################################################################
# Storage of the time point after the simulation of the network in a variable.
endsimulate = time.time()
###############################################################################
# Reading out the total number of spikes received from the spike recorder
# connected to the excitatory population and the inhibitory population.
events_ex = espikes.n_events
events_in = ispikes.n_events
###############################################################################
# Calculation of the average firing rate of the excitatory and the inhibitory
# neurons by dividing the total number of recorded spikes by the number of
# neurons recorded from and the simulation time. The multiplication by 1000.0
# converts the unit 1/ms to 1/s=Hz.
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
###############################################################################
# Reading out the number of connections established using the excitatory and
# inhibitory synapse model. The numbers are summed up resulting in the total
# number of synapses.
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
###############################################################################
# Establishing the time it took to build and simulate the network by taking
# the difference of the pre-defined time variables.
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
###############################################################################
# Printing the network properties, firing rates and building times.
print("Brunel network simulation (Python)")
print(f"Number of neurons : {N_neurons}")
print(f"Number of synapses: {num_synapses}")
print(f" Exitatory : {int(CE * N_neurons) + N_neurons}")
print(f" Inhibitory : {int(CI * N_neurons)}")
print(f"Excitatory rate : {rate_ex:.2f} Hz")
print(f"Inhibitory rate : {rate_in:.2f} Hz")
print(f"Building time : {build_time:.2f} s")
print(f"Simulation time : {sim_time:.2f} s")
###############################################################################
# Plot a raster of the excitatory neurons and a histogram.
nest.raster_plot.from_device(espikes, hist=True)
plt.show()
| gpl-2.0 |
cauchycui/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/linear_model/plot_ols.py | 104 | 1936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rmcgibbo/msmbuilder | msmbuilder/commands/implied_timescales.py | 12 | 5214 | # Author: Robert McGibbon <[email protected]>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
"""Scan the implied timescales of MarkovStateModels with respect to lag time.
This command will build a series of MarkovStateModels at different lag times,
and save a file to disk containing the relaxation timescales of each of the
models.
A plot of these data can then be used to choose the lag time [1].
References
----------
.. [1] Beauchamp, Kyle A., et al. "MSMBuilder2: modeling conformational
dynamics on the picosecond to millisecond scale." J. Chem. Theory.
Comput. 7.10 (2011): 3412-3419.
"""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
from os.path import splitext
import sys
import json
import pandas as pd
from ..dataset import dataset
from ..cmdline import Command, argument, argument_group, rangetype, FlagAction
from ..msm import MarkovStateModel, implied_timescales
class ImpliedTimescales(Command):
_group = 'MSM'
_concrete = True
description = __doc__
lag_times = argument('-l', '--lag_times', default='1:10', help='''Range
of lag times. Specify as 'start:stop' or 'start:stop:step. The
endpoints are inclusive.''', type=rangetype)
inp = argument(
'-i', '--inp', help='''Path to input dataset, a collection of 1D
integer sequences (such as the output from clustering)''',
required=True)
out = argument('--out', help='''Output file''',
default='timescales.csv')
fmt = argument('--fmt', help='Output file format', default='csv',
choices=('csv', 'json', 'excel'))
_extensions = {'csv': '.csv', 'json': '.json', 'excel': '.xlsx'}
n_jobs = argument('--n_jobs', help='Number of parallel processes',
default=1, type=int)
p = argument_group('MSM parameters')
n_timescales = p.add_argument('--n_timescales', default=10, help='''
The number of dynamical timescales to calculate when diagonalizing
the transition matrix.''', type=int)
reversible_type = p.add_argument('--reversible_type', help='''
Method by which the reversibility of the transition matrix
is enforced. 'mle' uses a maximum likelihood method that is
solved by numerical optimization, and 'transpose'
uses a more restrictive (but less computationally complex)
direct symmetrization of the expected number of counts.''',
choices=('mle', 'transpose'), default='mle')
ergodic_cutoff = p.add_argument('--ergodic_cutoff', default=1, help='''
Only the maximal strongly ergodic subgraph of the data is used to build
an MSM. Ergodicity is determined by ensuring that each state is
accessible from each other state via one or more paths involving edges
with a number of observed directed counts greater than or equal to
``ergodic_cutoff``. Not that by setting ``ergodic_cutoff`` to 0, this
trimming is effectively turned off.''', type=int)
prior_counts = p.add_argument('--prior_counts', help='''Add a number
of "pseudo counts" to each entry in the counts matrix. When
prior_counts == 0 (default), the assigned transition probability
between two states with no observed transitions will be zero, whereas
when prior_counts > 0, even this unobserved transitions will be
given nonzero probability.''', type=float, default=0)
verbose = p.add_argument('--verbose', default=True,
help='Enable verbose printout', action=FlagAction)
def __init__(self, args):
self.args = args
def start(self):
kwargs = {
'n_timescales': self.args.n_timescales,
'reversible_type': self.args.reversible_type,
'ergodic_cutoff': self.args.ergodic_cutoff,
'prior_counts': self.args.prior_counts,
'verbose': self.args.verbose,
}
with dataset(self.args.inp, mode='r') as ds:
model = MarkovStateModel(**kwargs)
lines = implied_timescales(
ds, lag_times=self.args.lag_times,
n_timescales=self.args.n_timescales,
msm=model,
n_jobs=self.args.n_jobs,
verbose=self.args.verbose)
cols = ['Timescale %d' % (d+1) for d in range(len(lines[0]))]
df = pd.DataFrame(data=lines, columns=cols)
df['Lag Time'] = self.args.lag_times
df = df.reindex_axis(sorted(df.columns), axis=1)
self.write_output(df)
def write_output(self, df):
outfile = splitext(self.args.out)[0] + self._extensions[self.args.fmt]
print('Writing %s' % outfile)
if self.args.fmt == 'csv':
df.to_csv(outfile)
elif self.args.fmt == 'json':
with open(outfile, 'w') as f:
json.dump(df.to_dict(orient='records'), f)
elif self.args.fmt == 'excel':
df.to_excel(outfile)
else:
raise RuntimeError('unknown fmt: %s' % fmt)
print('All done!')
| lgpl-2.1 |
gon1213/SDC | behavioral_cloning/CarND-Transfer-Learning-lab/shrink.py | 7 | 1354 | import pickle
import tensorflow as tf
from collections import Counter
from sklearn.utils import shuffle
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_string('training_file', '', "Bottleneck features training file (.p)")
flags.DEFINE_string('output_file', '', "Name of the output file with reduced number of examples.")
flags.DEFINE_integer('size', 100, 'Number of examples per class to keep')
def main(_):
# load bottleneck data
with open(FLAGS.training_file, 'rb') as f:
train_data = pickle.load(f)
X_train = train_data['features']
y_train = train_data['labels']
print(X_train.shape, y_train.shape)
X_train, y_train = shuffle(X_train, y_train, random_state=0)
keep_indices = []
keep_counter = Counter()
for i, label in enumerate(y_train.reshape(-1)):
if keep_counter[label] < FLAGS.size:
keep_counter[label] += 1
keep_indices.append(i)
X_train_small = X_train[keep_indices]
y_train_small = y_train[keep_indices]
print(X_train_small.shape, y_train_small.shape)
print("Writing to {}".format(FLAGS.output_file))
data = {'features': X_train_small, 'labels': y_train_small}
pickle.dump(data, open(FLAGS.output_file, 'wb'))
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
| gpl-3.0 |
ZENGXH/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
RecipeML/Recipe | recipe/preprocessors/gaussianR.py | 1 | 1211 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José and Alex de Sá
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.random_projection import GaussianRandomProjection
def gaussianR(args):
"""Uses scikit-learn's GaussianRandomProjection to reduce dimensionality through Gaussian random projection.
Parameters
----------
n_components: int
The number of components to keep
eps : strictly positive float
Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to ‘auto’.
"""
epsilon = float(args[1])
n_comp = int(args[2])
return GaussianRandomProjection(n_components=n_comp, eps=epsilon, random_state=42) | gpl-3.0 |
Supermem/ibis | ibis/config.py | 16 | 20779 | # This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from contextlib import contextmanager
import pprint
import warnings
import sys
from six import StringIO
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "unicode_escape")
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
buf = StringIO()
pprint.pprint(self.d, stream=buf)
return buf.getvalue()
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError("Value must be one of %s"
% str("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
# is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/openpyxl-2.6.4/openpyxl/styles/builtins.py | 2 | 31221 | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
# Builtins styles as defined in Part 4 Annex G.2
from .named_styles import NamedStyle
from openpyxl.xml.functions import fromstring
normal = """
<namedStyle builtinId="0" name="Normal">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
comma = """
<namedStyle builtinId="3" name="Comma">
<alignment/>
<number_format>_-* #,##0.00\\ _$_-;\\-* #,##0.00\\ _$_-;_-* "-"??\\ _$_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
comma_0 = """
<namedStyle builtinId="6" name="Comma [0]">
<alignment/>
<number_format>_-* #,##0\\ _$_-;\\-* #,##0\\ _$_-;_-* "-"\\ _$_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
currency = """
<namedStyle builtinId="4" name="Currency">
<alignment/>
<number_format>_-* #,##0.00\\ "$"_-;\\-* #,##0.00\\ "$"_-;_-* "-"??\\ "$"_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
currency_0 = """
<namedStyle builtinId="7" name="Currency [0]">
<alignment/>
<number_format>_-* #,##0\\ "$"_-;\\-* #,##0\\ "$"_-;_-* "-"\\ "$"_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
percent = """
<namedStyle builtinId="5" name="Percent">
<alignment/>
<number_format>0%</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
hyperlink = """
<namedStyle builtinId="8" name="Hyperlink" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="10"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
followed_hyperlink = """
<namedStyle builtinId="9" name="Followed Hyperlink" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="11"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
title = """
<namedStyle builtinId="15" name="Title">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Cambria"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="18"/>
<scheme val="major"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_1 = """
<namedStyle builtinId="16" name="Headline 1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="thick">
<color theme="4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="15"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_2 = """
<namedStyle builtinId="17" name="Headline 2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="thick">
<color theme="4" tint="0.5"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="13"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_3 = """
<namedStyle builtinId="18" name="Headline 3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="medium">
<color theme="4" tint="0.4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="11"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_4 = """
<namedStyle builtinId="19" name="Headline 4">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="11"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
good = """
<namedStyle builtinId="26" name="Good" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFC6EFCE"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF006100"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
bad = """
<namedStyle builtinId="27" name="Bad" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFC7CE"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF9C0006"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
neutral = """
<namedStyle builtinId="28" name="Neutral" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFEB9C"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF9C6500"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
input = """
<namedStyle builtinId="20" name="Input" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF7F7F7F"/>
</left>
<right style="thin">
<color rgb="FF7F7F7F"/>
</right>
<top style="thin">
<color rgb="FF7F7F7F"/>
</top>
<bottom style="thin">
<color rgb="FF7F7F7F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFCC99"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF3F3F76"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
output = """
<namedStyle builtinId="21" name="Output" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF3F3F3F"/>
</left>
<right style="thin">
<color rgb="FF3F3F3F"/>
</right>
<top style="thin">
<color rgb="FF3F3F3F"/>
</top>
<bottom style="thin">
<color rgb="FF3F3F3F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFF2F2F2"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color rgb="FF3F3F3F"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
calculation = """
<namedStyle builtinId="22" name="Calculation" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF7F7F7F"/>
</left>
<right style="thin">
<color rgb="FF7F7F7F"/>
</right>
<top style="thin">
<color rgb="FF7F7F7F"/>
</top>
<bottom style="thin">
<color rgb="FF7F7F7F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFF2F2F2"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color rgb="FFFA7D00"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
linked_cell = """
<namedStyle builtinId="24" name="Linked Cell" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="double">
<color rgb="FFFF8001"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FFFA7D00"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
check_cell = """
<namedStyle builtinId="23" name="Check Cell" >
<alignment/>
<border>
<left style="double">
<color rgb="FF3F3F3F"/>
</left>
<right style="double">
<color rgb="FF3F3F3F"/>
</right>
<top style="double">
<color rgb="FF3F3F3F"/>
</top>
<bottom style="double">
<color rgb="FF3F3F3F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFA5A5A5"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
warning = """
<namedStyle builtinId="11" name="Warning Text" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FFFF0000"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
note = """
<namedStyle builtinId="10" name="Note" >
<alignment/>
<border>
<left style="thin">
<color rgb="FFB2B2B2"/>
</left>
<right style="thin">
<color rgb="FFB2B2B2"/>
</right>
<top style="thin">
<color rgb="FFB2B2B2"/>
</top>
<bottom style="thin">
<color rgb="FFB2B2B2"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFFFCC"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
explanatory = """
<namedStyle builtinId="53" name="Explanatory Text" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<i val="1"/>
<color rgb="FF7F7F7F"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
total = """
<namedStyle builtinId="25" name="Total" >
<alignment/>
<border>
<left/>
<right/>
<top style="thin">
<color theme="4"/>
</top>
<bottom style="double">
<color theme="4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1 = """
<namedStyle builtinId="29" name="Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_20 = """
<namedStyle builtinId="30" name="20 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_40 = """
<namedStyle builtinId="31" name="40 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_60 = """
<namedStyle builtinId="32" name="60 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_2 = """<namedStyle builtinId="33" name="Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_20 = """
<namedStyle builtinId="34" name="20 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_40 = """
<namedStyle builtinId="35" name="40 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_60 = """
<namedStyle builtinId="36" name="60 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_3 = """
<namedStyle builtinId="37" name="Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_3_20 = """
<namedStyle builtinId="38" name="20 % - Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_3_40 = """
<namedStyle builtinId="39" name="40 % - Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_3_60 = """
<namedStyle builtinId="40" name="60 % - Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4 = """
<namedStyle builtinId="41" name="Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4_20 = """
<namedStyle builtinId="42" name="20 % - Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4_40 = """
<namedStyle builtinId="43" name="40 % - Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4_60 = """
<namedStyle builtinId="44" name="60 % - Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5 = """
<namedStyle builtinId="45" name="Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5_20 = """
<namedStyle builtinId="46" name="20 % - Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5_40 = """
<namedStyle builtinId="47" name="40 % - Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5_60 = """
<namedStyle builtinId="48" name="60 % - Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6 = """
<namedStyle builtinId="49" name="Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6_20 = """
<namedStyle builtinId="50" name="20 % - Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6_40 = """
<namedStyle builtinId="51" name="40 % - Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6_60 = """
<namedStyle builtinId="52" name="60 % - Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
pandas_highlight = """
<namedStyle hidden="0" name="Pandas">
<alignment horizontal="center"/>
<border>
<left style="thin"><color rgb="00000000"/></left>
<right style="thin"><color rgb="00000000"/></right>
<top style="thin"><color rgb="00000000"/></top>
<bottom style="thin"><color rgb="00000000"/></bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<b val="1"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
styles = dict(
[
('Normal', NamedStyle.from_tree(fromstring(normal))),
('Comma', NamedStyle.from_tree(fromstring(comma))),
('Currency', NamedStyle.from_tree(fromstring(currency))),
('Percent', NamedStyle.from_tree(fromstring(percent))),
('Comma [0]', NamedStyle.from_tree(fromstring(comma_0))),
('Currency [0]', NamedStyle.from_tree(fromstring(currency_0))),
('Hyperlink', NamedStyle.from_tree(fromstring(hyperlink))),
('Followed Hyperlink', NamedStyle.from_tree(fromstring(followed_hyperlink))),
('Note', NamedStyle.from_tree(fromstring(note))),
('Warning Text', NamedStyle.from_tree(fromstring(warning))),
('Title', NamedStyle.from_tree(fromstring(title))),
('Headline 1', NamedStyle.from_tree(fromstring(headline_1))),
('Headline 2', NamedStyle.from_tree(fromstring(headline_2))),
('Headline 3', NamedStyle.from_tree(fromstring(headline_3))),
('Headline 4', NamedStyle.from_tree(fromstring(headline_4))),
('Input', NamedStyle.from_tree(fromstring(input))),
('Output', NamedStyle.from_tree(fromstring(output))),
('Calculation',NamedStyle.from_tree(fromstring(calculation))),
('Check Cell', NamedStyle.from_tree(fromstring(check_cell))),
('Linked Cell', NamedStyle.from_tree(fromstring(linked_cell))),
('Total', NamedStyle.from_tree(fromstring(total))),
('Good', NamedStyle.from_tree(fromstring(good))),
('Bad', NamedStyle.from_tree(fromstring(bad))),
('Neutral', NamedStyle.from_tree(fromstring(neutral))),
('Accent1', NamedStyle.from_tree(fromstring(accent_1))),
('20 % - Accent1', NamedStyle.from_tree(fromstring(accent_1_20))),
('40 % - Accent1', NamedStyle.from_tree(fromstring(accent_1_40))),
('60 % - Accent1', NamedStyle.from_tree(fromstring(accent_1_60))),
('Accent2', NamedStyle.from_tree(fromstring(accent_2))),
('20 % - Accent2', NamedStyle.from_tree(fromstring(accent_2_20))),
('40 % - Accent2', NamedStyle.from_tree(fromstring(accent_2_40))),
('60 % - Accent2', NamedStyle.from_tree(fromstring(accent_2_60))),
('Accent3', NamedStyle.from_tree(fromstring(accent_3))),
('20 % - Accent3', NamedStyle.from_tree(fromstring(accent_3_20))),
('40 % - Accent3', NamedStyle.from_tree(fromstring(accent_3_40))),
('60 % - Accent3', NamedStyle.from_tree(fromstring(accent_3_60))),
('Accent4', NamedStyle.from_tree(fromstring(accent_4))),
('20 % - Accent4', NamedStyle.from_tree(fromstring(accent_4_20))),
('40 % - Accent4', NamedStyle.from_tree(fromstring(accent_4_40))),
('60 % - Accent4', NamedStyle.from_tree(fromstring(accent_4_60))),
('Accent5', NamedStyle.from_tree(fromstring(accent_5))),
('20 % - Accent5', NamedStyle.from_tree(fromstring(accent_5_20))),
('40 % - Accent5', NamedStyle.from_tree(fromstring(accent_5_40))),
('60 % - Accent5', NamedStyle.from_tree(fromstring(accent_5_60))),
('Accent6', NamedStyle.from_tree(fromstring(accent_6))),
('20 % - Accent6', NamedStyle.from_tree(fromstring(accent_6_20))),
('40 % - Accent6', NamedStyle.from_tree(fromstring(accent_6_40))),
('60 % - Accent6', NamedStyle.from_tree(fromstring(accent_6_60))),
('Explanatory Text', NamedStyle.from_tree(fromstring(explanatory))),
('Pandas', NamedStyle.from_tree(fromstring(pandas_highlight)))
]
)
| apache-2.0 |
IshankGulati/scikit-learn | benchmarks/bench_lof.py | 49 | 3548 | """
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
print(__doc__)
np.random.seed(2)
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['shuttle']
novelty_detection = True # if False, training set polluted by outliers
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, shuffle=True,
percent10=False)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
if novelty_detection:
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = -model.decision_function(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs,'
'test-time: %0.2fs)' % (dataset_name, AUC, fit_time,
predict_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
admed/molgears | widgets/read_result.py | 1 | 56114 | # -*- coding: utf-8 -*-
import xlrd
from datetime import datetime
from numpy import array
from math import log10
def read_fullplate(filepath, concs):
from math import fabs
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
if date_cell =='':
date_cell = worksheet.cell_value(29, 1)
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
if temp == '':
temp = worksheet.cell_value(31, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(8)] #macierz danych
bg_matrix = [[0 for x in range(10)] for y in range(4)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 33
i=0
while curr_row <= 40:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
if curr_cell == 1:
I.append(float(cell_value))
elif curr_cell == 12:
XII.append(float(cell_value))
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
I_avg = round(sum(I)/float(len(I)), 4)
avg_col = array(matrix[4:]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg = round(sum(XII[0:6])/float(len(XII[0:6])), 4)
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
for el in avg_col:
if fabs(el-f_avg)<11:
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0
for row in matrix[0:4]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[]
for row in matrix[:4]:
list =[]
for el in range(len(row)):
list.append(round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = []
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
# +++++++++++++++++++++++++++++ read top of plate ++++++++++++++++++++++++++++++++
####################################################
def read_topplate(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
# print "dir(worksheet)", dir(worksheet)
# print "worksheet.sheet_selected", worksheet.name
date_cell = worksheet.cell_value(28, 1)
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(8)] #macierz danych kolumna II-XI
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 33
i=0
assert worksheet.cell_value(33, 0) == 'A'
while curr_row <= 40:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
# print i, j, curr_cell, curr_row, worksheet.cell_value(curr_row, curr_cell), '\n'
if curr_cell == 1:
I.append(cell_value)
elif curr_cell == 12:
XII.append(cell_value)
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[3:4]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:6])/float(len(XII[0:6])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
from math import fabs
nums = array(avg_col[5:])
for el in avg_col:
# if fabs(el-f_avg)<2.5*nums.std():
if fabs(el-f_avg)<11:
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0 #obliczanie danych dla tabeli - BG (niebieska z excela)
for row in matrix[0:3]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
#obliczanie log10 z listy stężeń (kolumna C)
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[] #macierz zbudowana z kolumn a, b, c, d
for row in matrix[:3]:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = [] #macierz -BG zbudowana z kolumn a, b, c, d
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
def read_bottomplate(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(8)] #macierz danych kolumna II-XI
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 33
i=0
while curr_row <= 40:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
# print cell_value
if curr_cell == 1:
I.append(cell_value)
elif curr_cell == 12:
XII.append(cell_value)
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
# print I
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[-1:]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:6])/float(len(XII[0:6])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
# print "f_avg", f_avg
from math import fabs
for el in avg_col:
# print "el %s" % avg_col.index(el), el
if fabs(el-f_avg)<11:
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0 #obliczanie danych dla tabeli - BG (niebieska z excela)
for row in matrix[4:-1]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
#obliczanie log10 z listy stężeń (kolumna C)
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[] #macierz zbudowana z kolumn a, b, c, d
for row in matrix[4:-1]:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = [] #macierz -BG zbudowana z kolumn a, b, c, d
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append(round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
# +++++++++++++++++++++++++++++ read half of plate ++++++++++++++++++++++++++++++++
####################################################
def read_halfplate_top_peptide(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
print 'date_cell', date_cell
assert worksheet.cell_value(33, 0) == 'A'
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(4)] #macierz danych kolumna II-XI
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 33
i=0
while curr_row <= 36:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
if curr_cell == 1:
I.append(cell_value)
elif curr_cell == 12:
XII.append(cell_value)
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[3:4]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:3])/float(len(XII[0:3])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
from math import fabs
nums = array(avg_col[5:])
for el in avg_col:
if fabs(el-f_avg)<2.5*nums.std():
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0 #obliczanie danych dla tabeli - BG (niebieska z excela)
for row in matrix[0:3]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
#obliczanie log10 z listy stężeń (kolumna C)
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[] #macierz zbudowana z kolumn a, b, c, d
for row in matrix[:3]:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = [] #macierz -BG zbudowana z kolumn a, b, c, d
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
def read_halfplate_top_protein(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
print 'date_cell', date_cell
assert worksheet.cell_value(33, 0) == 'A'
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(4)] #macierz danych kolumna II-XI
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 33
# kolumna XII
while curr_row <= 40:
cell_value = worksheet.cell_value(curr_row, 12)
XII.append(cell_value)
curr_row+=1
curr_row = 33
i=0
while curr_row <= 36:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
if curr_cell == 1:
I.append(cell_value)
elif curr_cell == 12:
pass
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[3:4]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:6])/float(len(XII[0:6])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
from math import fabs
nums = array(avg_col[5:])
for el in avg_col:
if fabs(el-f_avg)<2.5*nums.std():
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0 #obliczanie danych dla tabeli - BG (niebieska z excela)
for row in matrix[0:3]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
#obliczanie log10 z listy stężeń (kolumna C)
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[] #macierz zbudowana z kolumn a, b, c, d
for row in matrix[:3]:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = [] #macierz -BG zbudowana z kolumn a, b, c, d
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
def read_halfplate_bottom_peptide(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(4)] #macierz danych kolumna II-XI
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 34
i=0
while curr_row <= 37:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
if curr_cell == 1:
I.append(cell_value)
elif curr_cell == 12:
XII.append(cell_value)
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[3:4]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:2])/float(len(XII[0:2])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
from math import fabs
nums = array(avg_col[5:])
for el in avg_col:
if fabs(el-f_avg)<2.5*nums.std():
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0 #obliczanie danych dla tabeli - BG (niebieska z excela)
for row in matrix[0:3]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
#obliczanie log10 z listy stężeń (kolumna C)
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[] #macierz zbudowana z kolumn a, b, c, d
for row in matrix[:3]:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = [] #macierz -BG zbudowana z kolumn a, b, c, d
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
def read_halfplate_bottom_protein(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
print 'date_cell', date_cell
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(4)] #macierz danych kolumna II-XI
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
#kolumna XII
curr_row = 33
i=0
while curr_row <= 40:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
if curr_cell == 12:
XII.append(cell_value)
j+=1
curr_cell+=1
curr_row+=1
i+=1
#pozostała macierz
curr_row = 37
i=0
while curr_row <= 40:
curr_cell = 1
j=0
while curr_cell <= 12:
print 'row', curr_row, 'cel', curr_cell
cell_value = worksheet.cell_value(curr_row, curr_cell)
print 'val', cell_value
if curr_cell == 1:
I.append(cell_value)
elif curr_cell == 12:
pass
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
print 'matrix', matrix
print 'I', I
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[3:4]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:6])/float(len(XII[0:6])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
from math import fabs
nums = array(avg_col[5:])
for el in avg_col:
if fabs(el-f_avg)<2.5*nums.std():
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0 #obliczanie danych dla tabeli - BG (niebieska z excela)
for row in matrix[0:3]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
#obliczanie log10 z listy stężeń (kolumna C)
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[] #macierz zbudowana z kolumn a, b, c, d
for row in matrix[:3]:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = [] #macierz -BG zbudowana z kolumn a, b, c, d
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append( round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
# +++++++++++++++read palte layaut 3+5++++++++++++++++++++++++++
def read_3_5_plate(filepath, concs):
workbook = xlrd.open_workbook(filepath)
worksheet = workbook.sheet_by_index(0)
date_cell = worksheet.cell_value(28, 1)
if date_cell =='':
date_cell = worksheet.cell_value(29, 1)
measure_date = datetime.strptime(str(date_cell), '%Y-%m-%d %H:%M:%S')
temp = worksheet.cell_value(30, 1)
if temp == '':
temp = worksheet.cell_value(31, 1)
temp = float(temp.split()[1])
matrix = [[0 for x in range(10)] for y in range(8)] #macierz danych
bg_matrix = [[0 for x in range(10)] for y in range(3)] #dane srednie - komorki niebieskie - BG
I = [] # pierwsza kolumna
XII = [] #ostatnia kolumna
f_list = [] #fluorescencja - lista
fluorescence = 0
curr_row = 33
i=0
while curr_row <= 40:
curr_cell = 1
j=0
while curr_cell <= 12:
cell_value = worksheet.cell_value(curr_row, curr_cell)
if curr_cell == 1:
I.append(float(cell_value))
elif curr_cell == 12:
XII.append(float(cell_value))
else:
matrix[i][j]=cell_value
j+=1
curr_cell+=1
curr_row+=1
i+=1
I_avg=round(sum(I)/float(len(I)), 4) #średnia wartość dla liczb z kolumny 1
avg_col = array(matrix[3:]).T.mean(axis=1).tolist() # lista średnich wartości dla kontroli
XII_avg=round(sum(XII[0:6])/float(len(XII[0:6])), 4) #średnia wartość dla liczb z kolumny 12
normal =[100*(el-XII_avg)/(I_avg-XII_avg) for el in avg_col]
f_avg = 0.0 #avarage number for caunting fluorescence
for el in avg_col[5:]:
f_avg += el/len(avg_col[5:])
from math import fabs
for el in avg_col:
if fabs(el-f_avg)<11:
f_list.append(1)
fluorescence+=1
else:
f_list.append(0)
x=0
for row in matrix[0:3]:
y=0
for i in row:
bg_matrix[x][y]=round(i-avg_col[y]+XII_avg, 4)
y+=1
x+=1
C =[]
C_BG =[]
Ycontrol=[[]]*2
for con in concs:
Ycontrol[0].append(round(log10(con), 4))
Ycontrol[1]=normal
for el in range(len(concs)):
C.append(round(log10(concs[int(el)]), 4))
for el in range(len(concs)):
C_BG.append(round(log10(concs[int(el)]), 4))
ABCD =[]
for row in matrix[:3]:
list =[]
for el in range(len(row)):
list.append(round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
ABCD.append(list)
BG_ABCD = []
for row in bg_matrix:
list =[]
for el in range(len(row)):
list.append(round(100*(row[int(el)]-XII_avg)/(I_avg-XII_avg), 4))
BG_ABCD.append(list)
return(C, C_BG, ABCD, BG_ABCD, Ycontrol, measure_date, temp, fluorescence, I_avg, XII_avg, f_list)
## +++++++++++++++ Zapisz Dane ++++++++++++++++++++++++++
# X - macierz X
# X_BG - macierz X -BG
# ABCD -macierz wyników RAW
# ABCD_BG - macierz wyników RAW - BG
#filepath - scieżka do pliku graficznego png
def save_fix_data(CX, CX_BG, Y_ABCD, Y_ABCD_BG, Ycontrol, test, filepath, scope, scope_BG, f_list, raw, dpi=None):
from pylab import plot, title, xlabel, ylabel, legend, axes, text, savefig, grid, clf, gca, axvline, axhline, setp, errorbar, close
from scipy import optimize, linspace
from math import fabs
from numpy import mean
#create thumb file name:
import os
head, tail = os.path.split(filepath)
thumbpath = os.path.join(head, "thumb_"+tail)
props = dict(boxstyle='square', facecolor='white')
X = []
X_BG = []
ABCD = []
ABCD_BG = []
for el in scope:
X.append(CX[int(el)-1])
for el in scope_BG:
X_BG.append(CX_BG[int(el)-1])
for row in Y_ABCD:
list =[]
for el in scope:
list.append(row[int(el)-1])
ABCD.append(list)
for row in Y_ABCD_BG:
list =[]
for el in scope_BG:
list.append(row[int(el)-1])
ABCD_BG.append(list)
x = X
x_bg = X_BG
nums = array(ABCD).T
nums_BG = array(ABCD_BG).T
ALL_nums = array(Y_ABCD).T
ALL_nums_BG = array(Y_ABCD_BG).T
# STDEV
Yerr = nums.std(axis=1)
Y_BGerr = nums_BG.std(axis=1)
# Means from array
y = nums.mean(axis=1).tolist()
y_bg = nums_BG.mean(axis=1).tolist()
ALL_y = ALL_nums.mean(axis=1).tolist()
ALL_y_bg = ALL_nums_BG.mean(axis=1).tolist()
hi = int(round(max(x)+2, 0))
lo = int(round(min(x), 0))-1
p0_gess = CX[int((len(CX)/2)-1)]
p0 = [p0_gess,-1.]
Top = 100
Bottom = 0
out_of_scale = []
out_of_scale_BG = []
sections = {} # dict of lists of y.index(el) - for ranges(1,7): >90 | 90-75 | 75-50 | 50-25 | 25-10 | <10
for el in y:
if el > 120.0:
out_of_scale.append((X[y.index(el)], round(el, 4)))
sections.setdefault(6, []).append(y.index(el))
elif el > 90.0:
sections.setdefault(6, []).append(y.index(el))
elif el <= 90.0 and el > 75.0:
sections.setdefault(5, []).append(y.index(el))
elif el <= 75.0 and el > 50.0:
sections.setdefault(4, []).append(y.index(el))
elif el <= 50.0 and el > 25.0:
sections.setdefault(3, []).append(y.index(el))
elif el <= 25.0 and el > 10.0:
sections.setdefault(2, []).append(y.index(el))
elif el <= 10.0 and el >= -20.0:
sections.setdefault(1, []).append(y.index(el))
elif el < -20.0:
out_of_scale.append((X[y.index(el)], round(el, 4)))
sections.setdefault(1, []).append(y.index(el))
sections_BG = {} # dict of lists of y.index(el) - for ranges(1,7): >90 | 90-75 | 75-50 | 50-25 | 25-10 | <10
for el in y_bg:
if el > 120.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
sections_BG.setdefault(6, []).append(y_bg.index(el))
elif el > 90.0:
sections_BG.setdefault(6, []).append(y_bg.index(el))
elif el <= 90.0 and el > 75.0:
sections_BG.setdefault(5, []).append(y_bg.index(el))
elif el <= 75.0 and el > 50.0:
sections_BG.setdefault(4, []).append(y_bg.index(el))
elif el <= 50.0 and el > 25.0:
sections_BG.setdefault(3, []).append(y_bg.index(el))
elif el <= 25.0 and el > 10.0:
sections_BG.setdefault(2, []).append(y_bg.index(el))
elif el <= 10.0 and el >= -20.0:
sections_BG.setdefault(1, []).append(y_bg.index(el))
elif el < -20.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
sections_BG.setdefault(1, []).append(y_bg.index(el))
for el in y_bg:
if el > 120.0 or el < -20.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
keys = []
fluorescence = 0
if raw:
selected_section = sections
else:
selected_section = sections_BG
for k, v in selected_section.iteritems():
if len(v)>0:
keys.append(k)
k_min = min(keys)
k_max = max(keys)
fluorescence += k_max-k_min
for k, v in selected_section.iteritems():
if len(v)>0:
test_me = True
for vx in v:
if f_list[vx] == 0:
test_me = False
if test_me:
fluorescence+=1
else:
if k<k_max and k>k_min:
fluorescence+=1
fitfunc = lambda p,X : Bottom + (Top-Bottom)/ (1+10**((p[0]-X)*p[1]))
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p1,cov,infodict,mesg,ier = optimize.leastsq(errfunc, p0[:], args=(x, y), full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((y-mean(y))**2).sum()
Rsquared=1-(ss_err/ss_tot)
p1_bg ,cov_bg,infodict_bg,mesg_bg,ier_bg = optimize.leastsq(errfunc, p0[:], args=(x_bg, y_bg), full_output=True)
ss_err_bg=(infodict_bg['fvec']**2).sum()
ss_tot_bg=((y_bg-mean(y_bg))**2).sum()
Rsquared_BG=1-(ss_err_bg/ss_tot_bg)
x_range = linspace(hi, lo, 100)
x_range_solid = linspace(X[-1], X[0], 100)
graph1 = plot(Ycontrol[0], Ycontrol[1], "--", color="gray", label=None)
errorbar(CX, ALL_y, yerr=None, fmt="o", color="lightblue", label=None)
errorbar(x, y, yerr=Yerr, fmt="o", color="#003366", label=None)
graph_solid= plot(x_range_solid, fitfunc(p1, x_range_solid), "-", color="#003366", label=None)
graph_dotted= plot(x_range, fitfunc(p1, x_range), "--", color="#003366", label=None)
title("Receptor binding - Fit logIC50")
ylabel("inhibition [%]")
res = p1[:]
log_ic50 = res[0]
res[0] = 10**p1[0]
ki = (test.lb*res[0]*test.kd)/((test.lt*test.rt)+test.lb*(test.rt-test.lt+test.lb-test.kd))
# if out_of_scale:
# if res[0]>9999.0:
# msg = 'IC50: >9999.0\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res[1], out_of_scale)
# else:
# msg = 'IC50: {: 7.3f}\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res[0], res[1], out_of_scale)
# else:
# if res[0]>9999.0:
# msg = 'IC50: >9999.0\nHS: {: 7.3f}'.format(res[1])
# else:
# msg = 'IC50: {: 7.3f}\nHS: {: 7.3f}'.format(res[0], res[1])
if res[0]>9999.0:
msg = 'Ki: >999.0\nIC50: >9999.0\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(res[1], Rsquared)
else:
msg = 'Ki: {: 7.3f}\nIC50: {: 7.3f}\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(ki, res[0], res[1], Rsquared)
text(lo+.05, 24,
msg,
fontsize=10,
verticalalignment='bottom',
color="#003366",
alpha=1.0,
backgroundcolor="white"
)
# graph2 = plot(x_bg, y_bg, "ro", color="#006600", label="IC50 -BG")
x_bg_range_solid = linspace(X_BG[-1], X_BG[0], 100)
errorbar(CX_BG, ALL_y_bg, yerr=None, fmt='o', color="lightgreen", label=None)
errorbar(x_bg, y_bg, yerr=Y_BGerr, fmt='o', color="#006600", label=None)
graph2_solid = plot(x_bg_range_solid, fitfunc(p1_bg, x_bg_range_solid), "-", color="#006600", label=None)
graph2_dotted = plot(x_range, fitfunc(p1_bg, x_range), "--", color="#006600", label=None)
# Legend the plot
xlabel(r"conc [log($\mu$M)]")
ylabel("inhibition [%]")
from matplotlib.lines import Line2D
line = Line2D(range(10), range(10), linestyle='-', marker='o', color="#003366")
line2 = Line2D(range(10), range(10), linestyle='-', marker='o', color="#006600")
line3 = Line2D(range(10), range(10), linestyle='-', marker='.', color="gray")
leg = legend((line, line2, line3, ), ('IC50', 'IC50 -BG', 'BG', ), loc=1)
ltext = leg.get_texts()
setp(ltext, fontsize='small')
res_bg = p1_bg[:]
log_ic50_bg = res_bg[0]
res_bg[0] = 10**p1_bg[0]
ki_bg = (test.lb*res_bg[0]*test.kd)/((test.lt*test.rt)+test.lb*(test.rt-test.lt+test.lb-test.kd))
# if out_of_scale_BG:
# if res_bg[0]>9999.0:
# msg_BG = 'IC50: >9999.0\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res_bg[1], out_of_scale_BG)
# else:
# msg_BG = 'IC50: {: 7.3f}\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res_bg[0], res_bg[1], out_of_scale_BG)
# else:
# if res_bg[0]>9999.0:
# msg_BG = 'IC50: >9999.0\nHS: {: 7.3f}'.format(res_bg[1])
# else:
# msg_BG = 'IC50: {: 7.3f}\nHS: {: 7.3f}'.format(res_bg[0], res_bg[1])
if res_bg[0]>9999.0:
msg_BG = 'Ki: >999.0\nIC50: >9999.0\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(res_bg[1], Rsquared_BG)
else:
msg_BG = 'Ki: {: 7.3f}\nIC50: {: 7.3f}\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(ki_bg, res_bg[0], res_bg[1], Rsquared_BG)
text(lo+.05, 2,
msg_BG,
fontsize=10,
verticalalignment='bottom',
color="#006600",
alpha=1.0,
backgroundcolor="white"
)
grid()
ax = gca()
ax.set_ylim(-20,120) #
ax.set_xlim(lo,hi) #
axvline(x=0, color="#2e7682")
axhline(y=0, color="#2e7682")
for item in ([ax.xaxis.label, ax.yaxis.label] +ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(10)
if dpi:
savefig(filepath, dpi=dpi)
else:
savefig(filepath)
savefig(thumbpath, dpi=50)
close()
return((round(res[0], 4), round(res[1], 4), round(Rsquared, 4), round(ki, 4)), (round(res_bg[0], 4), round(res_bg[1], 4), round(Rsquared_BG, 4), round(ki_bg, 4)), fluorescence)
def save_data_fix_HS(CX, CX_BG, Y_ABCD, Y_ABCD_BG, Ycontrol, test, filepath, scope, scope_BG, f_list, raw, dpi=None):
from pylab import plot, title, xlabel, ylabel, legend, axes, text, savefig, grid, clf, gca, axvline, axhline, setp, errorbar, close
from scipy import optimize, linspace
from math import fabs
from numpy import mean
import os
head, tail = os.path.split(filepath)
thumbpath = os.path.join(head, "thumb_"+tail)
props = dict(boxstyle='square', facecolor='white')
X = []
X_BG = []
ABCD = []
ABCD_BG = []
for el in scope:
X.append(CX[int(el)-1])
for el in scope_BG:
X_BG.append(CX_BG[int(el)-1])
for row in Y_ABCD:
list =[]
for el in scope:
list.append(row[int(el)-1])
ABCD.append(list)
for row in Y_ABCD_BG:
list =[]
for el in scope_BG:
list.append(row[int(el)-1])
ABCD_BG.append(list)
x = X
x_bg = X_BG
nums = array(ABCD).T
nums_BG = array(ABCD_BG).T
ALL_nums = array(Y_ABCD).T
ALL_nums_BG = array(Y_ABCD_BG).T
# STDEV
Yerr = nums.std(axis=1)
Y_BGerr = nums_BG.std(axis=1)
# Means from array
y = nums.mean(axis=1).tolist()
y_bg = nums_BG.mean(axis=1).tolist()
ALL_y = ALL_nums.mean(axis=1).tolist()
ALL_y_bg = ALL_nums_BG.mean(axis=1).tolist()
hi = int(round(max(x)+2, 0))
lo = int(round(min(x), 0))-1
p0_gess = CX[int((len(CX)/2)-1)]
p0 = [p0_gess]
Top = 100
Bottom = 0
out_of_scale = []
out_of_scale_BG = []
sections = {} # dict of lists of y.index(el) - for ranges(1,7): >90 | 90-75 | 75-50 | 50-25 | 25-10 | <10
for el in y:
if el > 120.0:
out_of_scale.append((X[y.index(el)], round(el, 4)))
sections.setdefault(6, []).append(y.index(el))
elif el > 90.0:
sections.setdefault(6, []).append(y.index(el))
elif el <= 90.0 and el > 75.0:
sections.setdefault(5, []).append(y.index(el))
elif el <= 75.0 and el > 50.0:
sections.setdefault(4, []).append(y.index(el))
elif el <= 50.0 and el > 25.0:
sections.setdefault(3, []).append(y.index(el))
elif el <= 25.0 and el > 10.0:
sections.setdefault(2, []).append(y.index(el))
elif el <= 10.0 and el >= -20.0:
sections.setdefault(1, []).append(y.index(el))
elif el < -20.0:
out_of_scale.append((X[y.index(el)], round(el, 4)))
sections.setdefault(1, []).append(y.index(el))
sections_BG = {} # dict of lists of y.index(el) - for ranges(1,7): >90 | 90-75 | 75-50 | 50-25 | 25-10 | <10
for el in y_bg:
if el > 120.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
sections_BG.setdefault(6, []).append(y_bg.index(el))
elif el > 90.0:
sections_BG.setdefault(6, []).append(y_bg.index(el))
elif el <= 90.0 and el > 75.0:
sections_BG.setdefault(5, []).append(y_bg.index(el))
elif el <= 75.0 and el > 50.0:
sections_BG.setdefault(4, []).append(y_bg.index(el))
elif el <= 50.0 and el > 25.0:
sections_BG.setdefault(3, []).append(y_bg.index(el))
elif el <= 25.0 and el > 10.0:
sections_BG.setdefault(2, []).append(y_bg.index(el))
elif el <= 10.0 and el >= -20.0:
sections_BG.setdefault(1, []).append(y_bg.index(el))
elif el < -20.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
sections_BG.setdefault(1, []).append(y_bg.index(el))
for el in y_bg:
if el > 120.0 or el < -20.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
keys = []
fluorescence = 0
if raw:
selected_section = sections
else:
selected_section = sections_BG
for k, v in selected_section.iteritems():
if len(v)>0:
keys.append(k)
k_min = min(keys)
k_max = max(keys)
fluorescence += k_max-k_min
for k, v in selected_section.iteritems():
if len(v)>0:
test_me = True
for vx in v:
if f_list[vx] == 0:
test_me = False
if test_me:
fluorescence+=1
else:
if k<k_max and k>k_min:
fluorescence+=1
fitfunc = lambda p,X : Bottom + (Top-Bottom)/ (1+10**((p[0]-X)*(-1.)))
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p1,cov,infodict,mesg,ier = optimize.leastsq(errfunc, p0[:], args=(x, y), full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((y-mean(y))**2).sum()
Rsquared=1-(ss_err/ss_tot)
p1_bg ,cov_bg,infodict_bg,mesg_bg,ier_bg = optimize.leastsq(errfunc, p0[:], args=(x_bg, y_bg), full_output=True)
ss_err_bg=(infodict_bg['fvec']**2).sum()
ss_tot_bg=((y_bg-mean(y_bg))**2).sum()
Rsquared_BG=1-(ss_err_bg/ss_tot_bg)
x_range = linspace(hi, lo, 100)
x_range_solid = linspace(X[-1], X[0], 100)
graph1 = plot(Ycontrol[0], Ycontrol[1], "--", color="gray", label=None)
errorbar(CX, ALL_y, yerr=None, fmt="o", color="lightblue", label=None)
errorbar(x, y, yerr=Yerr, fmt="o", color="#003366", label=None)
graph_solid= plot(x_range_solid, fitfunc(p1, x_range_solid), "-", color="#003366", label=None)
graph_dotted= plot(x_range, fitfunc(p1, x_range), "--", color="#003366", label=None)
title("Receptor binding - Fit logIC50")
ylabel("inhibition [%]")
res = p1[:]
log_ic50 = res[0]
res[0] = 10**p1[0]
ki = (test.lb*res[0]*test.kd)/((test.lt*test.rt)+test.lb*(test.rt-test.lt+test.lb-test.kd))
# if out_of_scale:
# if res[0]>9999.0:
# msg = 'IC50: >9999.0\noff-scale(x,y): {:}'.format(Rsq)
# else:
# msg = 'IC50: {: 7.3f}\noff-scale(x,y): {:}'.format(res[0], out_of_scale)
# else:
# if res[0]>9999.0:
# msg = 'Ki: >999.0\nIC50: >9999.0\nHS: -1.0\nR2: {: 7.3f}'.format(Rsquared)
# else:
# msg = 'Ki: {: 7.3f}\nIC50: {: 7.3f}\nHS: -1.0\nR2: {: 7.3f}'.format(ki, res[0], Rsquared)
if res[0]>9999.0:
msg = 'Ki: >999.0\nIC50: >9999.0\nHS: -1.0\nR2: {: 7.3f}'.format(Rsquared)
else:
msg = 'Ki: {: 7.3f}\nIC50: {: 7.3f}\nHS: -1.0\nR2: {: 7.3f}'.format(ki, res[0], Rsquared)
text(lo+.05, 24,
msg,
fontsize=10,
verticalalignment='bottom',
color="#003366",
alpha=1.0,
backgroundcolor="white"
)
# graph2 = plot(x_bg, y_bg, "ro", color="#006600", label="IC50 -BG")
x_bg_range_solid = linspace(X_BG[-1], X_BG[0], 100)
errorbar(CX_BG, ALL_y_bg, yerr=None, fmt='o', color="lightgreen", label=None)
errorbar(x_bg, y_bg, yerr=Y_BGerr, fmt='o', color="#006600", label=None)
graph2_solid = plot(x_bg_range_solid, fitfunc(p1_bg, x_bg_range_solid), "-", color="#006600", label=None)
graph2_dotted = plot(x_range, fitfunc(p1_bg, x_range), "--", color="#006600", label=None)
# Legend the plot
xlabel(r"conc [log($\mu$M)]")
ylabel("inhibition [%]")
from matplotlib.lines import Line2D
line = Line2D(range(10), range(10), linestyle='-', marker='o', color="#003366")
line2 = Line2D(range(10), range(10), linestyle='-', marker='o', color="#006600")
line3 = Line2D(range(10), range(10), linestyle='-', marker='.', color="gray")
leg = legend((line, line2, line3, ), ('IC50', 'IC50 -BG', 'BG', ), loc=1)
ltext = leg.get_texts()
setp(ltext, fontsize='small')
res_bg = p1_bg[:]
log_ic50_bg = res_bg[0]
res_bg[0] = 10**p1_bg[0]
ki_bg = (test.lb*res_bg[0]*test.kd)/((test.lt*test.rt)+test.lb*(test.rt-test.lt+test.lb-test.kd))
# if out_of_scale_BG:
# if res_bg[0]>9999.0:
# msg_BG = 'IC50: >9999.0\nR2: {: 7.3f}'.format(Rsquared_BG)
# else:
# msg_BG = 'IC50: {: 7.3f}\nR2: {: 7.3f}'.format(res_bg[0], Rsquared_BG)
# else:
# if res_bg[0]>9999.0:
# msg_BG = 'Ki: >999.0\nIC50: >9999.0\nHS: \nR2: {: 10.3f}'.format(Rsquared_BG)
# else:
# msg_BG = 'Ki: {: 10.3f}\nIC50: {: 10.3f}\nHS: -1.0\nR2: {: 10.3f}'.format(ki_bg, res_bg[0], Rsquared_BG)
#
if res_bg[0]>9999.0:
msg_BG = 'Ki: >999.0\nIC50: >9999.0\nHS: -1.0\nR2: {: 7.3f}'.format(Rsquared_BG)
else:
msg_BG = 'Ki: {: 7.3f}\nIC50: {: 7.3f}\nHS: -1.0\nR2: {: 7.3f}'.format(ki_bg, res_bg[0], Rsquared_BG)
text(lo+.05, 2,
msg_BG,
fontsize=10,
verticalalignment='bottom',
color="#006600",
alpha=1.0,
backgroundcolor="white"
)
grid()
ax = gca()
ax.set_ylim(-20,120) #
ax.set_xlim(lo,hi) #
axvline(x=0, color="#2e7682")
axhline(y=0, color="#2e7682")
for item in ([ax.xaxis.label, ax.yaxis.label] +ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(10)
if dpi:
savefig(filepath, dpi=dpi)
else:
savefig(filepath)
savefig(thumbpath, dpi=50)
close()
return((round(res[0], 4), round(Rsquared, 4), round(ki, 4)), (round(res_bg[0], 4), round(Rsquared_BG, 4), round(ki_bg, 4)), fluorescence)
#def save_data(X, Y, filepath):
# from pylab import plot, title, xlabel, ylabel, legend, axes, text, show, savefig
# from scipy import optimize, linspace
# x = X
# y = Y
#
# p0 = [100.0, 0., 0.1,-1.]
#
# fitfunc = lambda p,X : p[1] + (p[0]-p[1])/ (1+10**((p[2]-X)*p[3]))
# errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
#
# p1, success = optimize.leastsq(errfunc, p0[:], args=(x, y))
#
# print p1, success
#
# x_range = linspace(-2, 4,100)
#
# plot(x, y, "ro", x_range, fitfunc(p1, x_range), "r-",
# #x_range, fitfunc(p0, x_range), "b-"
# ) # Plot of the data and the fit
#
# # Legend the plot
# title("IC50")
# xlabel("conc [log(uM)]")
# ylabel("inhibition [%]")
# legend(('points', 'opt',
# #'guess'
# ))
#
# ax = axes()
# res = p1[:]
# res[2] = 10**p1[2]
# text(-0.5, 0,
# 'Top: %.3f \nBottom: %.3f\nIC50: %.3f \nHill Slope: %.3f'% tuple(res),
# fontsize=12,
# verticalalignment='bottom',
# #transform=ax.transAxes
# )
# savefig(filepath)
# return(tuple(res))
def save_fix_data2(CX, CX_BG, Y_ABCD, Y_ABCD_BG, Ycontrol, filepath, scope, scope_BG, f_list, raw, dpi=None):
from pylab import plot, title, xlabel, ylabel, legend, axes, text, savefig, grid, clf, gca, axvline, axhline, setp, errorbar
from scipy import optimize, linspace
from math import fabs
from numpy import mean
props = dict(boxstyle='square', facecolor='white')
X = []
X_BG = []
ABCD = []
ABCD_BG = []
for el in scope:
X.append(CX[int(el)-1])
for el in scope_BG:
X_BG.append(CX_BG[int(el)-1])
for row in Y_ABCD:
list =[]
for el in scope:
list.append(row[int(el)-1])
ABCD.append(list)
for row in Y_ABCD_BG:
list =[]
for el in scope_BG:
list.append(row[int(el)-1])
ABCD_BG.append(list)
x = X
x_bg = X_BG
nums = array(ABCD).T
nums_BG = array(ABCD_BG).T
ALL_nums = array(Y_ABCD).T
ALL_nums_BG = array(Y_ABCD_BG).T
# STDEV
Yerr = nums.std(axis=1)
Y_BGerr = nums_BG.std(axis=1)
# Means from array
y = nums.mean(axis=1).tolist()
print 'y', y
y_bg = nums_BG.mean(axis=1).tolist()
print 'y_bg', y_bg
ALL_y = ALL_nums.mean(axis=1).tolist()
ALL_y_bg = ALL_nums_BG.mean(axis=1).tolist()
hi = int(round(max(x)+2, 0))
lo = int(round(min(x), 0))-1
print "hi: %s ; lo: %s" % (hi, lo)
p0_gess = CX[int((len(CX)/2)-1)]
p0 = [p0_gess,-1.]
Top = 100
Bottom = 0
out_of_scale = []
out_of_scale_BG = []
sections = {} # dict of lists of y.index(el) - for ranges(1,7): >90 | 90-75 | 75-50 | 50-25 | 25-10 | <10
for el in y:
if el > 120.0:
out_of_scale.append((X[y.index(el)], round(el, 4)))
sections.setdefault(6, []).append(y.index(el))
elif el > 90.0:
sections.setdefault(6, []).append(y.index(el))
elif el <= 90.0 and el > 75.0:
sections.setdefault(5, []).append(y.index(el))
elif el <= 75.0 and el > 50.0:
sections.setdefault(4, []).append(y.index(el))
elif el <= 50.0 and el > 25.0:
sections.setdefault(3, []).append(y.index(el))
elif el <= 25.0 and el > 10.0:
sections.setdefault(2, []).append(y.index(el))
elif el <= 10.0 and el >= -20.0:
sections.setdefault(1, []).append(y.index(el))
elif el < -20.0:
out_of_scale.append((X[y.index(el)], round(el, 4)))
sections.setdefault(1, []).append(y.index(el))
sections_BG = {} # dict of lists of y.index(el) - for ranges(1,7): >90 | 90-75 | 75-50 | 50-25 | 25-10 | <10
for el in y_bg:
if el > 120.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
sections_BG.setdefault(6, []).append(y_bg.index(el))
elif el > 90.0:
sections_BG.setdefault(6, []).append(y_bg.index(el))
elif el <= 90.0 and el > 75.0:
sections_BG.setdefault(5, []).append(y_bg.index(el))
elif el <= 75.0 and el > 50.0:
sections_BG.setdefault(4, []).append(y_bg.index(el))
elif el <= 50.0 and el > 25.0:
sections_BG.setdefault(3, []).append(y_bg.index(el))
elif el <= 25.0 and el > 10.0:
sections_BG.setdefault(2, []).append(y_bg.index(el))
elif el <= 10.0 and el >= -20.0:
sections_BG.setdefault(1, []).append(y_bg.index(el))
elif el < -20.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
sections_BG.setdefault(1, []).append(y_bg.index(el))
for el in y_bg:
if el > 120.0 or el < -20.0:
out_of_scale_BG.append((X_BG[y_bg.index(el)], round(el, 4)))
keys = []
fluorescence = 0
if raw:
selected_section = sections
else:
selected_section = sections_BG
for k, v in selected_section.iteritems():
if len(v)>0:
keys.append(k)
k_min = min(keys)
k_max = max(keys)
fluorescence += k_max-k_min
for k, v in selected_section.iteritems():
if len(v)>0:
test_me = True
for vx in v:
if f_list[vx] == 0:
test_me = False
if test_me:
fluorescence+=1
else:
if k<k_max and k>k_min:
fluorescence+=1
fitfunc = lambda p,X : Bottom + (Top-Bottom)/ (1+10**((p[0]-X)*p[1]))
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p1,cov,infodict,mesg,ier = optimize.leastsq(errfunc, p0[:], args=(x, y), full_output=True)
print "p1", p1
print "cov", cov
print "infodict", infodict
print "mesg", mesg
print "ier", ier
ss_err=(infodict['fvec']**2).sum()
ss_tot=((y-mean(y))**2).sum()
Rsquared=1-(ss_err/ss_tot)
print "x_bg", x_bg
print "y_bg", y_bg
p1_bg ,cov_bg,infodict_bg,mesg_bg,ier_bg = optimize.leastsq(errfunc, p0[:], args=(x_bg, y_bg), full_output=True)
print "p1_bg", p1_bg
print "cov_bg", cov_bg
print "infodict_bg", infodict_bg
print "ier_bg", ier_bg
ss_err_bg=(infodict_bg['fvec']**2).sum()
ss_tot_bg=((y_bg-mean(y_bg))**2).sum()
Rsquared_BG=1-(ss_err_bg/ss_tot_bg)
x_range = linspace(hi, lo, 100)
x_range_solid = linspace(X[-1], X[0], 100)
graph1 = plot(Ycontrol[0], Ycontrol[1], "--", color="gray", label=None)
errorbar(CX, ALL_y, yerr=None, fmt="o", color="lightblue", label=None)
errorbar(x, y, yerr=Yerr, fmt="o", color="#003366", label=None)
graph_solid= plot(x_range_solid, fitfunc(p1, x_range_solid), "-", color="#003366", label=None)
graph_dotted= plot(x_range, fitfunc(p1, x_range), "--", color="#003366", label=None)
title("Receptor binding - Fit logIC50")
ylabel("inhibition [%]")
res = p1[:]
log_ic50 = res[0]
res[0] = 10**p1[0]
# ki = (test.lb*res[0]*test.kd)/((test.lt*test.rt)+test.lb*(test.rt-test.lt+test.lb-test.kd))
# if out_of_scale:
# if res[0]>9999.0:
# msg = 'IC50: >9999.0\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res[1], out_of_scale)
# else:
# msg = 'IC50: {: 7.3f}\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res[0], res[1], out_of_scale)
# else:
# if res[0]>9999.0:
# msg = 'IC50: >9999.0\nHS: {: 7.3f}'.format(res[1])
# else:
# msg = 'IC50: {: 7.3f}\nHS: {: 7.3f}'.format(res[0], res[1])
if res[0]>9999.0:
msg = 'Ki: >999.0\nIC50: >9999.0\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(res[1], Rsquared)
else:
msg = 'IC50: {: 7.3f}\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(res[0], res[1], Rsquared)
text(lo+.05, 24,
msg,
fontsize=10,
verticalalignment='bottom',
color="#003366",
alpha=1.0,
backgroundcolor="white"
)
# graph2 = plot(x_bg, y_bg, "ro", color="#006600", label="IC50 -BG")
x_bg_range_solid = linspace(X_BG[-1], X_BG[0], 100)
errorbar(CX_BG, ALL_y_bg, yerr=None, fmt='o', color="lightgreen", label=None)
errorbar(x_bg, y_bg, yerr=Y_BGerr, fmt='o', color="#006600", label=None)
graph2_solid = plot(x_bg_range_solid, fitfunc(p1_bg, x_bg_range_solid), "-", color="#006600", label=None)
graph2_dotted = plot(x_range, fitfunc(p1_bg, x_range), "--", color="#006600", label=None)
# Legend of the plot
xlabel(r"conc [log($\mu$M)]")
ylabel("inhibition [%]")
from matplotlib.lines import Line2D
line = Line2D(range(10), range(10), linestyle='-', marker='o', color="#003366")
line2 = Line2D(range(10), range(10), linestyle='-', marker='o', color="#006600")
line3 = Line2D(range(10), range(10), linestyle='-', marker='.', color="gray")
leg = legend((line, line2, line3, ), ('IC50', 'IC50 -BG', 'BG', ), loc=1)
ltext = leg.get_texts()
setp(ltext, fontsize='small')
res_bg = p1_bg[:]
log_ic50_bg = res_bg[0]
res_bg[0] = 10**p1_bg[0]
# ki_bg = (test.lb*res_bg[0]*test.kd)/((test.lt*test.rt)+test.lb*(test.rt-test.lt+test.lb-test.kd))
# if out_of_scale_BG:
# if res_bg[0]>9999.0:
# msg_BG = 'IC50: >9999.0\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res_bg[1], out_of_scale_BG)
# else:
# msg_BG = 'IC50: {: 7.3f}\nHS: {: 7.3f}\noff-scale(x,y): {:}'.format(res_bg[0], res_bg[1], out_of_scale_BG)
# else:
# if res_bg[0]>9999.0:
# msg_BG = 'IC50: >9999.0\nHS: {: 7.3f}'.format(res_bg[1])
# else:
# msg_BG = 'IC50: {: 7.3f}\nHS: {: 7.3f}'.format(res_bg[0], res_bg[1])
if res_bg[0]>9999.0:
msg_BG = 'Ki: >999.0\nIC50: >9999.0\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(res_bg[1], Rsquared_BG)
else:
msg_BG = 'IC50: {: 7.3f}\nHS: {: 7.3f}\nR2: {: 7.3f}'.format(res_bg[0], res_bg[1], Rsquared_BG)
text(lo+.5, 2,
msg_BG,
fontsize=10,
verticalalignment='bottom',
color="#006600",
alpha=1.0,
backgroundcolor="white"
)
grid()
ax = gca()
ax.set_ylim(-20,220) #
ax.set_xlim(lo,hi) #
axvline(x=0, color="#2e7682")
axhline(y=0, color="#2e7682")
for item in ([ax.xaxis.label, ax.yaxis.label] +ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(10)
if dpi:
savefig(filepath, dpi=dpi)
else:
savefig(filepath)
clf()
return((round(res[0], 4), round(res[1], 4), round(Rsquared, 4)), (round(res_bg[0], 4), round(res_bg[1], 4), round(Rsquared_BG, 4)), fluorescence)
| bsd-3-clause |
gwtsa/gwtsa | pastas/project/plots.py | 1 | 1701 | """This module contains plottings methods for Pastas projects.
Raoul Collenteur, 2018 - Artesia Water.
"""
import matplotlib.pyplot as plt
class Plot:
def __init__(self, mls):
"""
Parameters
----------
mls: pastas.Project
Pastas project
"""
self.mls = mls
def stresses(self, kind=None, cols=2, **kwargs):
"""Make plots of the stresses in different subplots.
Parameters
----------
kind: str
string with one of the types of the stresses.
cols: int
Number of columns to divide the plots over.
Returns
-------
ax: matplotlib.axes
returns a list of matplotlib axes instances.
"""
if isinstance(kind, str):
kinds = [kind]
else:
kinds = list(kind)
stresses = self.mls.stresses.index[self.mls.stresses.kind.isin(kinds)]
num = len(stresses)
rows = -(-num // cols) # round up with out additional import
# Automatically adjust figsize
if "figsize" not in kwargs.keys():
norm_size = plt.rcParams["figure.figsize"]
kwargs["figsize"] = [cols / 2 * norm_size[0],
rows / 5 * norm_size[1]]
_, ax = plt.subplots(rows, cols, **kwargs)
if hasattr(ax, "flatten"):
ax = ax.flatten()
else:
ax = [ax]
for i, key in enumerate(stresses):
self.mls.stresses.loc[key, "series"].series.plot(ax=ax[i],
x_compat=True)
ax[i].legend([key], loc=2)
return ax
| mit |
schets/scikit-learn | sklearn/semi_supervised/label_propagation.py | 24 | 15181 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
attacc/lumen | PythonPP/current.py | 1 | 1621 | #!/usr/bin/python3
import argparse
import numpy as np
import re
import sys
from scipy.interpolate import InterpolatedUnivariateSpline
import matplotlib.pyplot as plt
"""
Calculate current using finite differences from the polarization
Author: C. Attaccalite
"""
#
# parse command line
#
parser = argparse.ArgumentParser(prog='lumen_PP',description='Current from the polarization',epilog="Copyright C. Attaccalite")
parser.add_argument('-f', help="polarization file",type=str , default=None, dest="polname")
args = parser.parse_args()
print("\n * * * Calculate Current from the Polarization * * * \n\n")
args = parser.parse_args()
if args.polname == None:
print('type "current.py --help" for help ',)
exit(0)
fs2aut=41.341373336561361 # convertion femptosecond to atomic units of time
data=np.genfromtxt(args.polname,comments="#")
data_der=np.empty_like(data)
delta =(data[1,0]-data[0,0])*fs2aut
npoints=len(data[:,0])
ndata =len(data[0,:])-1
print("Number of points: %d " % npoints)
print("Number of column: %d " % ndata)
print("DeltaT : %f " % delta)
for ip in range(1,npoints-1):
for idt in range(1,ndata):
data_der[ip,idt]=(data[ip+1,idt]-data[ip-1,idt])/(2.0*delta)
for idt in range(1,ndata):
data_der[0,idt] =(data[1,idt]-data[0,idt])/(delta)
data_der[npoints-1,idt]=(data[npoints-1,idt]-data[npoints-2,idt])/(delta)
data_der[:,0]=data[:,0]
#for idt in range(1,ndata):
# interp= InterpolatedUnivariateSpline(data[:,0], data[:,idt], k=3)
# derf = interp.derivative()
# data_der[idt]=derf(data[0])
np.savetxt('der_polarization.dat',data_der,fmt='%2.15e')
| gpl-2.0 |
mehdidc/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 40346 | from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone, ClassifierMixin
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning, NotFittedError
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if estimator.__class__.__name__ == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
assert_raise_message(TypeError, "string or a number", estimator.fit, X, y)
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises(NotFittedError, transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_unfitted(name, Estimator):
"""Check if NotFittedError is raised when calling predict and related
functions"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'predict'):
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'decision_function'):
assert_raises(NotFittedError, est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raises(NotFittedError, est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raises(NotFittedError, est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
| bsd-3-clause |
themrmax/scikit-learn | examples/linear_model/plot_robust_fit.py | 147 | 3050 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
inkenbrandt/EPAEN | Line_Orientation/line_orientation.py | 2 | 9653 | # -*- coding: utf-8 -*-
__author__ = "Paul C. Inkenbrandt <[email protected]>"
__version__ = "0.0"
# Import required modules
import arcpy
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
arcpy.AddMessage("All Modules Imported Successfully!")
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
arcpy.AddMessage("Spatial Extension is Available")
# Overwrite pre-existing files
arcpy.env.overwriteOutput = True
# User Inputs #################################################################
# Elevation data
Elevation_Raster = arcpy.GetParameterAsText(0)
if Elevation_Raster == '#' or not Elevation_Raster:
Elevation_Raster = "C:\\PROJECTS\\Pdwr_Mtn\\Pdwr_Mtn.gdb\\NED10" # provide a default value if unspecified
# Get name of Raster File and assign to variable fieldname
def getFileNameWithoutExtension(path):
return path.split('\\').pop().split('/').pop().rsplit('.', 1)[0]
fieldname = getFileNameWithoutExtension(Elevation_Raster)
# Lines with orientations
Line_features = arcpy.GetParameterAsText(1)
if Line_features == '#' or not Line_features:
Line_features = "C:\\PROJECTS\\Pdwr_Mtn\\Pdwr_Mtn.gdb\\Stream_Line.shp" # provide a default value if unspecified
# Location and name of Feature Class Where resulting segments will be stored
Output_Line_features = arcpy.GetParameterAsText(2)
# Place to put rose diagram
pdf = PdfPages(arcpy.GetParameterAsText(8)+".pdf")
###############################################################################
# Straighten Lines
arcpy.SimplifyLine_cartography(Line_features, Output_Line_features, "POINT_REMOVE", float(arcpy.GetParameterAsText(3)),"FLAG_ERRORS", "NO_KEEP","NO_CHECK")
arcpy.AddMessage("Simplifying Lines complete")
# Create temporary input file for Split Line tool
tempFC = arcpy.env.scratchGDB + os.path.sep + "tempFC"
arcpy.CopyFeatures_management(Output_Line_features, tempFC)
# Overwrite Output lines with line segments
arcpy.SplitLine_management(tempFC, Output_Line_features)
arcpy.Delete_management(tempFC)
arcpy.AddMessage("Splitting Lines Complete")
# Make a temporary feature class to hold line segment vertices
tempVert = arcpy.env.scratchGDB + os.path.sep + "tempVert"
# Process: Feature Vertices To Points
arcpy.FeatureVerticesToPoints_management(Output_Line_features, tempVert, "BOTH_ENDS")
# Process: Add XY Coordinates to points
arcpy.AddXY_management(tempVert)
# Process: Add Elevation Info to points
arcpy.sa.ExtractMultiValuesToPoints(tempVert, Elevation_Raster, "BILINEAR")
arcpy.AddMessage("Elevation Data Extracted")
# Add RASTERVALU field and populate with output from Extract Values to points
arcpy.AddField_management(tempVert,"RASTERVALU","FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(tempVert, "RASTERVALU", "!"+fieldname+"!", "PYTHON_9.3")
# Process: Summary Statistics
pivotStats = arcpy.env.scratchGDB + os.path.sep + "stats"
arcpy.Statistics_analysis(tempVert, pivotStats, "POINT_X FIRST;POINT_X LAST;POINT_Y FIRST;POINT_Y LAST;RASTERVALU FIRST;RASTERVALU LAST", "ORIG_FID")
arcpy.AddMessage("Calculated Statistics")
# Process: Add and calculate Trend Field
arcpy.AddField_management(pivotStats, "trend", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(pivotStats, "trend", "180 + math.atan2((!LAST_POINT_Y! - !FIRST_POINT_Y!),(!LAST_POINT_X! - !FIRST_POINT_X!)) * (180 / math.pi)", "PYTHON_9.3", "")
arcpy.AddMessage("Trend Calculated")
# Process: Add and calculate Distance Field
arcpy.AddField_management(pivotStats, "distance", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(pivotStats, "distance", "math.sqrt((!LAST_POINT_Y! - !FIRST_POINT_Y!)**2 + (!LAST_POINT_X! - !FIRST_POINT_X!)**2)", "PYTHON_9.3", "")
arcpy.AddMessage("Distance Calculated")
# Process: Add and calculate Plunge Field
arcpy.AddField_management(pivotStats, "plunge", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(pivotStats, "plunge", "math.atan2((!LAST_RASTERVALU! - !FIRST_RASTERVALU!), !distance!)*180/math.pi", "PYTHON_9.3", "")
arcpy.AddMessage("Plunge Calculated")
# Process: Join Table to Shapefile
arcpy.JoinField_management(Output_Line_features, "OBJECTID", pivotStats, "ORIG_FID", "")
arcpy.AddMessage("Fields Joined")
# Clean up intermediate files
arcpy.Delete_management(tempVert)
arcpy.Delete_management(arcpy.env.scratchGDB + os.path.sep + "pivot")
arcpy.Delete_management(pivotStats)
###############################################################################
## PLOTS!!! ##
# Rose Plot
makeroseplot = arcpy.GetParameterAsText(4) # Boolean for activating rose plot
if str(makeroseplot) == 'true':
import itertools
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
# Grab values from the output table to make plots
arr = arcpy.da.TableToNumPyArray(Output_Line_features,('trend','plunge'),null_value=0)
arcpy.AddMessage("Trend exported for plotting")
## ROSE DIAGRAM
# http://stackoverflow.com/questions/16264837/how-would-one-add-a-colorbar-to-this-example
# Plot concept by Joe Kington
some_array_of_azimuth_directions = arr['trend']
azi = arr['trend']
z = [abs(arr['plunge'][i]) for i in range(len(arr['plunge']))]
def rose(azimuths, z=None, ax=None, bins=30, bidirectional=False,
color_by=np.mean, **kwargs):
"""Create a "rose" diagram (a.k.a. circular histogram).
Parameters:
-----------
azimuths: sequence of numbers
The observed azimuths in degrees.
z: sequence of numbers (optional)
A second, co-located variable to color the plotted rectangles by.
ax: a matplotlib Axes (optional)
The axes to plot on. Defaults to the current axes.
bins: int or sequence of numbers (optional)
The number of bins or a sequence of bin edges to use.
bidirectional: boolean (optional)
Whether or not to treat the observed azimuths as bi-directional
measurements (i.e. if True, 0 and 180 are identical).
color_by: function or string (optional)
A function to reduce the binned z values with. Alternately, if the
string "count" is passed in, the displayed bars will be colored by
their y-value (the number of azimuths measurements in that bin).
Additional keyword arguments are passed on to PatchCollection.
Returns:
--------
A matplotlib PatchCollection
"""
azimuths = np.asanyarray(azimuths)
if color_by == 'count':
z = np.ones_like(azimuths)
color_by = np.sum
if ax is None:
ax = plt.gca()
ax.set_theta_direction(-1)
ax.set_theta_offset(np.radians(90))
if bidirectional:
other = azimuths + 180
azimuths = np.concatenate([azimuths, other])
if z is not None:
z = np.concatenate([z, z])
# Convert to 0-360, in case negative or >360 azimuths are passed in.
azimuths[azimuths > 360] -= 360
azimuths[azimuths < 0] += 360
counts, edges = np.histogram(azimuths, range=[0, 360], bins=bins)
if z is not None:
idx = np.digitize(azimuths, edges)
z = np.array([color_by(z[idx == i]) for i in range(1, idx.max() + 1)])
z = np.ma.masked_invalid(z)
edges = np.radians(edges)
coll = colored_bar(edges[:-1], counts, z=z, width=np.diff(edges),
ax=ax, **kwargs)
return coll
def colored_bar(left, height, z=None, width=0.8, bottom=0, ax=None, **kwargs):
"""A bar plot colored by a scalar sequence."""
if ax is None:
ax = plt.gca()
width = itertools.cycle(np.atleast_1d(width))
bottom = itertools.cycle(np.atleast_1d(bottom))
rects = []
for x, y, h, w in zip(left, bottom, height, width):
rects.append(Rectangle((x,y), w, h))
coll = PatchCollection(rects, array=z, **kwargs)
ax.add_collection(coll)
ax.autoscale()
return coll
plt.figure(figsize=(5,6))
plt.subplot(111, projection='polar')
coll = rose(azi, z=z, bidirectional=True)
plt.xticks(np.radians(range(0, 360, 45)),
['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW'])
plt.colorbar(coll, orientation='horizontal')
plt.rgrids()
plt.xlabel('Dip (Degrees)')
plt.title(arcpy.GetParameterAsText(5))
pdf.savefig()
# Stereo Plot
makeStereoplot = arcpy.GetParameterAsText(6) # Boolean for activating stereo plot
if str(makeStereoplot) == 'true':
# from https://github.com/joferkington/mplstereonet
import mplstereonet
plt.figure()
fig, ax = mplstereonet.subplots()
# data from output file; add 90 to adjust for strike
strikes = [arr['trend'][i]+90 for i in range(len(arr['trend']))]
dips = [abs(arr['plunge'][i]) for i in range(len(arr['plunge']))]
cax = ax.density_contourf(strikes, dips, measurement='poles')
ax.pole(strikes, dips)
ax.grid(True)
fig.colorbar(cax)
plt.title(arcpy.GetParameterAsText(7))
pdf.savefig()
# Save multipage pdf
if str(makeStereoplot) == 'true' or str(makeroseplot) == 'true':
pdf.close()
arcpy.AddMessage("Plot saved at " + arcpy.GetParameterAsText(8)+ ".pdf") | gpl-2.0 |
geodynamics/burnman | contrib/CHRU2014/paper_incorrect_averaging.py | 2 | 9759 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
paper_incorrect_averaging
-------------------------
This script reproduces :cite:`Cottaar2014`, Figure 5.
Attempt to reproduce Figure 6.12 from :cite:`Murakami2013`
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
import burnman
from misc.helper_solid_solution import HelperSolidSolution
import misc.colors as colors
if __name__ == "__main__":
plt.figure(dpi=100, figsize=(12, 6))
prop = {'size': 12}
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r'\usepackage{relsize}'
dashstyle2 = (7, 3)
dashstyle3 = (3, 2)
method = 'slb2'
# define the minerals from table 6.3
mg_perovskite = burnman.Mineral()
mg_perovskite.params = {'name': 'Mg perovskite',
'molar_mass': 0.1004,
'V_0': 24.43e-6,
'K_0': 253.0e9,
'Kprime_0': 3.9,
'G_0': 172.9e9,
'Gprime_0': 1.56,
'n': 5.0,
'Debye_0': 1100.,
'grueneisen_0': 1.40,
'q_0': 1.40,
'eta_s_0': 2.6}
mg_perovskite.set_method('slb2')
fe_perovskite = burnman.Mineral()
fe_perovskite.params = {'name': 'Fe perovskite',
'molar_mass': 0.1319,
'V_0': 25.49e-6,
'K_0': 281.0e9,
'Kprime_0': 4.1,
'G_0': 138.0e9,
'Gprime_0': 1.70,
'n': 5.0,
'Debye_0': 841.,
'grueneisen_0': 1.48,
'q_0': 1.40,
'eta_s_0': 2.1}
fe_perovskite.set_method(method)
periclase = burnman.Mineral()
periclase.params = {'name': 'periclase',
'molar_mass': 0.0403,
'V_0': 11.24e-6,
'K_0': 161.0e9,
'Kprime_0': 3.9,
'G_0': 130.9e9,
'Gprime_0': 1.92,
'n': 2.0,
'Debye_0': 773.,
'grueneisen_0': 1.50,
'q_0': 1.50,
'eta_s_0': 2.3}
periclase.set_method(method)
wustite = burnman.Mineral()
wustite.params = {'name': 'wustite',
'molar_mass': 0.07184,
'V_0': 12.06e-6,
'K_0': 152.0e9,
'Kprime_0': 4.9,
'G_0': 47.0e9,
'Gprime_0': 0.70,
'n': 2.0,
'Debye_0': 455.,
'grueneisen_0': 1.28,
'q_0': 1.50,
'eta_s_0': 0.8}
wustite.set_method(method)
# in the text for the book chapter a linear relationship in
# elastic properties for the solid solutions is assumed...
class ferropericlase(HelperSolidSolution):
def __init__(self, fe_num):
endmembers = [periclase, wustite]
molar_fractions = [1. - fe_num, 0.0 + fe_num]
HelperSolidSolution.__init__(self, endmembers, molar_fractions)
class perovskite(HelperSolidSolution):
def __init__(self, fe_num):
endmembers = [mg_perovskite, fe_perovskite]
molar_fractions = [1. - fe_num, 0.0 + fe_num]
HelperSolidSolution.__init__(self, endmembers, molar_fractions)
# define the pressures
pressure = np.linspace(28.0e9, 129e9, 25)
# seismic model for comparison:
# pick from .prem() .slow() .fast()
# (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM()
depths = seismic_model.depth(pressure)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
# define temperatures
temperature_bs = burnman.geotherm.brown_shankland(depths)
temperature_an = burnman.geotherm.anderson(depths)
# pure perovskite
perovskitite = burnman.Composite([perovskite(0.06)], [1.0])
perovskitite.set_method(method)
# pure periclase
periclasite = burnman.Composite([ferropericlase(0.21)], [1.0])
periclasite.set_method(method)
# pyrolite (80% perovskite)
pyrolite = burnman.Composite([perovskite(0.06), ferropericlase(0.21)],
[0.834, 0.166])
pyrolite.set_method(method)
# preferred mixture?
amount_perovskite = 0.92
preferred_mixture = burnman.Composite(
[perovskite(0.06), ferropericlase(0.21)],
[amount_perovskite, 1.0 - amount_perovskite])
preferred_mixture.set_method(method)
mat_rho_1, mat_vp_1, mat_vs_1, mat_vphi_1, mat_K_1, mat_G_1 = \
perovskitite.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], seis_p, temperature_bs)
mat_rho_2, mat_vp_2, mat_vs_2, mat_vphi_2, mat_K_2, mat_G_2 = \
periclasite.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], seis_p, temperature_bs)
mat_rho_3, mat_vp_3, mat_vs_3, mat_vphi_3, mat_K_3, mat_G_3 = \
pyrolite.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], seis_p, temperature_bs)
mat_rho_4, mat_vp_4, mat_vs_4, mat_vphi_4, mat_K_4, mat_G_4 = \
preferred_mixture.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], seis_p, temperature_bs)
# HERE IS THE STEP WITH THE INCORRECT MIXING ###
# comment this out to have correct phase averaging, leave it in to have
# incorrect phase averaging
mat_vs_3_wr = (0.5 * ((0.834 * mat_vs_1 + 0.166 * mat_vs_2)
+ np.ones_like(mat_vs_1)
/ (0.834 / mat_vs_1 + 0.166 / mat_vs_2)))
mat_vs_4_wr = (0.5 * ((0.92 * mat_vs_1 + 0.08 * mat_vs_2)
+ np.ones_like(mat_vs_1)
/ (0.92 / mat_vs_1 + 0.08 / mat_vs_2)))
plt.subplot(1, 2, 2)
plt.ylim(5.2, 7.4)
plt.xlim(25, 135)
# fig1 = mpimg.imread('input_figures/murakami_book_chapter.png')
# plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto')
plt.plot(seis_p / 1.e9, seis_vs / 1.e3, color='k', linestyle='-',
marker='None', markerfacecolor='w', markersize=4, label='PREM',
linewidth=3.0, mew=1.5)
plt.plot(seis_p / 1.e9, mat_vs_1 / 1.e3, color=colors.color(3),
marker='v', markerfacecolor=colors.color(3), markersize=4,
markeredgecolor=colors.color(3), markevery=2, linewidth=1.5,
label='perovskite')
plt.plot(seis_p / 1.e9, mat_vs_2 / 1.e3, color=colors.color(1),
linestyle='-', linewidth=1.5, marker='^',
markerfacecolor=colors.color(1), markersize=4,
markeredgecolor=colors.color(1), markevery=2, label='periclase')
plt.plot(seis_p / 1.e9, mat_vs_4_wr / 1.e3, color=colors.color(4),
dashes=dashstyle3, linewidth=1.5, marker='o',
markerfacecolor=colors.color(4), markersize=4,
markeredgecolor=colors.color(4), markevery=2, label='92\\% pv')
plt.plot(seis_p / 1.e9, mat_vs_3_wr / 1.e3, color='g', linestyle='-',
dashes=dashstyle2, linewidth=1.5, marker='o', markerfacecolor='w',
markersize=4, markeredgecolor='g', markevery=2, label='83\\% pv')
plt.legend(loc='lower right', prop={'size': 12})
plt.title("Phase average on velocities")
plt.xlabel("Pressure (GPa)")
plt.subplot(1, 2, 1)
plt.ylim(5.2, 7.4)
plt.xlim(25, 135)
# fig1 = mpimg.imread('input_figures/murakami_book_chapter.png')
# plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto')
plt.plot(seis_p / 1.e9, seis_vs / 1.e3, color='k', linestyle='-',
marker='None', markerfacecolor='w', markersize=4,
label='PREM', linewidth=3.0, mew=1.5)
plt.plot(seis_p / 1.e9, mat_vs_1 / 1.e3, color=colors.color(3),
marker='v', markerfacecolor=colors.color(3), markersize=4,
markeredgecolor=colors.color(3), markevery=2, linewidth=1.5,
label='perovskite')
plt.plot(seis_p / 1.e9, mat_vs_2 / 1.e3, color=colors.color(1),
linestyle='-', linewidth=1.5, marker='^',
markerfacecolor=colors.color(1), markersize=4,
markeredgecolor=colors.color(1), markevery=2, label='periclase')
plt.plot(seis_p / 1.e9, mat_vs_4 / 1.e3, color=colors.color(4),
dashes=dashstyle3, linewidth=1.5, marker='o',
markerfacecolor=colors.color(4), markersize=4,
markeredgecolor=colors.color(4), markevery=2, label='92\\% pv')
plt.plot(seis_p / 1.e9, mat_vs_3 / 1.e3, color='g', linestyle='-',
dashes=dashstyle2, linewidth=1.5, marker='o',
markerfacecolor='w', markersize=4, markeredgecolor='g',
markevery=2, label='83\\% pv')
plt.title(" V.-R.-H. on moduli")
plt.xlabel("Pressure (GPa)")
plt.ylabel("Shear Velocity Vs (km/s)")
plt.tight_layout()
if "RUNNING_TESTS" not in globals():
plt.savefig("example_incorrect_averaging.pdf", bbox_inches='tight')
plt.show()
| gpl-2.0 |
HBNLdev/DataStore | db/file_handling.py | 1 | 51898 | '''Handling HBNL files
'''
import os
import shutil
import subprocess
from collections import OrderedDict
from datetime import datetime
import h5py
import numpy as np
import pandas as pd
import db.database as D
from .utils.compilation import join_allcols, extract_session_fromuID, join_ufields, column_split
from .utils.filename_parsing import parse_filename, system_shorthands
from .utils.records import unflatten_dict
def next_file_with_base(directory, base, ext):
''' given directory, base filename, and extension,
return the next file of its type '''
files = [f for f in os.listdir(directory) if base in f and '.' + ext in f]
if files:
numbers = [int(os.path.splitext(f)[0].split('_')[-1]) for f in files]
next_num = max(numbers) + 1
else:
next_num = 1
next_file = base + '_' + str(next_num) + '.' + ext
return next_file
##############################
##
# EEG
##
##############################
class RestingDAT:
''' represents David's files containing estimates of resting state power for various frequency bands
and bipolar derived channels '''
bands = ['3-5', '5-7', '7-9', '9-12', '12-16', '16-20', '20-28']
channels = ['FP1-F3', 'FP2-F4', 'FP1-F7', 'FP2-F8', 'F7-F3', 'F8-F4', 'F7-T7', 'F8-T8', 'F3-C3', 'F4-C4',
'FZ-CZ', 'CZ-PZ', 'T7-C3', 'T8-C4', 'T7-P7', 'T8-P8', 'C3-P3', 'C4-P4', 'P7-P3', 'P8-P4', 'P7-O1',
'P8-O2', 'P3-O1', 'P4-O2', 'PZ-O1', 'PZ-O2', 'O1-O2', 'CZ-C3', 'CZ-C4', 'PZ-P3', 'PZ-P4', 'F7-C3',
'F8-C4', 'FP1-FP2', 'F3-FZ', 'FZ-F4', ]
columns = ['uID', 'age']
for band in bands:
for chan in channels:
columns.append('_'.join((chan, band)))
def __init__(s, path):
s.path = path
def ns_to_dataframe(s):
file_df = pd.read_csv(s.path, delim_whitespace=True, header=None)
file_df.columns = s.columns
file_df['session'] = file_df['uID'].apply(extract_session_fromuID)
file_df['ID'] = file_df['uID'].apply(column_split, args=[1, '_'])
file_df.set_index(['ID', 'session'], drop=False, inplace=True)
file_df['uID_hbnl'] = file_df['uID']
file_df['uID'] = file_df[['ID', 'session']].apply(join_allcols, axis=1)
s.file_df = file_df
def mc_to_dataframe(s, session):
s.columns[0] = 'ID'
file_df = pd.read_csv(s.path, delim_whitespace=True, header=None)
file_df.columns = s.columns
file_df['ID'] = file_df['ID'].apply(int).apply(str)
file_df['session'] = session
file_df.set_index(['ID', 'session'], drop=False, inplace=True)
file_df['uID'] = file_df[['ID', 'session']].apply(join_allcols, axis=1)
s.file_df = file_df
class CNTH1_File:
def __init__(s, filepath):
s.filepath = filepath
s.filename = os.path.split(filepath)[1]
s.file_info = parse_filename(s.filename)
def parse_fileDB(s):
''' prepare the data field for the database object '''
s.data = {}
s.data.update(s.file_info)
s.data.update({'filepath': s.filepath})
s.data['ID'] = s.data['id']
def read_trial_info(s, nlines=-1):
h5header = subprocess.check_output(
['/opt/bin/print_h5_header', s.filepath])
head_lines = h5header.decode().split('\n')
hD = {}
for L in head_lines[:nlines]:
if L[:8] == 'category':
cat = L[9:].split('"')[1]
hD[cat] = {}
curD = hD[cat]
elif L[:len(cat)] == cat:
subcat = L.split(cat)[1].strip()
hD[cat][subcat] = {}
curD = hD[cat][subcat]
else:
parts = L.split(';')
var = parts[0].split('"')[1]
val = parse_maybe_numeric(parts[1].split(',')[0].strip())
curD[var] = val
s.trial_info = hD
def parse_maybe_numeric(st):
proc = st.replace('-', '')
dec = False
if '.' in st:
dec = True
proc = st.replace('.', '')
if proc.isnumeric():
if dec:
return float(st)
else:
return int(st)
return st
def extract_case_tuple(path):
''' given a path to an .avg.h1 file, extract a case tuple for comparison '''
f = h5py.File(path, 'r')
case_info = f['file']['run']['case']['case'][:]
case_lst = []
for case in case_info:
index = case[0][0]
type_letter = case[-3][0].decode()
type_word = case[-2][0].decode()
case_lst.append((index, type_letter, type_word))
case_tup = tuple(case_lst)
return case_tup
class AVGH1_File(CNTH1_File):
''' represents *.avg.h1 files, mostly for the behavioral info inside '''
min_resptime = 100
trial_columns = ['Trial', 'Case Index', 'Response Code', 'Stimulus', 'Correct', 'Omitted', 'Artifact Present',
'Accepted', 'Max Amp in Threshold Window', 'Threshold', 'Reaction Time (ms)', 'Time (s)']
def __init__(s, filepath):
s.filepath = filepath
CNTH1_File.__init__(s, filepath)
path_parts = filepath.split(os.path.sep)
system_letters = path_parts[-2][:2]
s.file_info['system'] = system_shorthands[system_letters]
s.data = {'uID': s.file_info['id'] + '_' + s.file_info['session']}
def fix_ant(s):
case_tup = extract_case_tuple(s.filepath)
try:
ind = MT_File.ant_cases_types_lk.index(case_tup)
except IndexError:
print('case info unexpected')
return
if ind > 0:
for type_ind in range(4):
s.case_dict[type_ind]['code'] = 'JPAW'[type_ind]
def parse_behav_forDB(s, general_info=False):
''' wrapper for main function that also prepares for DB insert '''
# s.data = {}
# experiment specific stuff
if s.file_info['system'] == 'masscomp':
s.load_data_mc()
if s.file_info['experiment'] == 'ant':
s.fix_ant()
s.calc_results_mc()
elif s.file_info['system'] == 'neuroscan':
s.load_data()
s.parse_seq()
s.calc_results() # puts behavioral results in s.results
else:
print('system not recognized')
return
s.data[s.exp] = unflatten_dict(s.results)
s.data[s.exp]['filepath'] = s.filepath
s.data[s.exp]['run'] = s.file_info.pop('run')
s.data[s.exp]['version'] = s.file_info.pop('version')
# ID-session specific stuff
s.data.update(s.file_info)
s.data['ID'] = s.data['id']
# s.data['uID'] = s.data['ID']+'_'+s.data['session']
del s.data['experiment']
if not general_info:
s.data = {s.exp: s.data[s.exp], 'uID': s.data['uID']}
def calc_results(s):
''' calculates accuracy and reaction time from the event table '''
results = {}
for t, t_attrs in s.case_dict.items():
nm = t_attrs['code']
stmevs = s.ev_df['type_seq'] == t
if t_attrs['corr_resp'] == 0: # no response required
correct = s.ev_df.loc[stmevs, 'correct']
results[nm + '_acc'] = np.sum(correct) / np.sum(stmevs)
continue
# response required
rspevs = (np.roll(stmevs, 1)) & (s.ev_df['resp_seq'] != 0)
correct_late = rspevs & (s.ev_df['correct'])
correct = correct_late & ~(s.ev_df['late'])
results[nm + '_acc'] = np.sum(correct) / np.sum(stmevs)
results[nm + '_accwithlate'] = np.sum(correct_late) / np.sum(stmevs)
results[nm + '_medianrt'] = s.ev_df.loc[correct, 'rt'].median()
results[nm + '_medianrtwithlate'] = \
s.ev_df.loc[correct_late, 'rt'].median()
# for certain experiments, keep track of noresp info
if s.file_info['experiment'] in ['ant', 'ern', 'stp']:
noresp = s.ev_df.loc[stmevs, 'noresp']
results[nm + '_noresp'] = np.sum(noresp) / np.sum(stmevs)
results[nm + '_accwithresp'] = np.sum(correct) / \
(np.sum(stmevs) - np.sum(noresp))
results[nm + '_accwithrespwithlate'] = np.sum(correct_late) / \
(np.sum(stmevs) - np.sum(noresp))
# this part logs the median reaction time for each type of response
# (i.e. both for correct and incorrect responses)
for rc in s.acceptable_resps:
tmp_df = s.ev_df[(s.ev_df['resp_seq'] == rc) &
~(s.ev_df['early']) & ~(s.ev_df['errant'])]
results[nm + str(rc) + '_medianrtwithlate'] = \
tmp_df['rt'].median()
tmp_df2 = tmp_df[~tmp_df['late']]
results[nm + str(rc) + '_medianrt'] = tmp_df2['rt'].median()
s.results = results
def calc_results_mc(s):
results = {}
for t, t_attrs in s.case_dict.items():
nm = t_attrs['code']
case_trials = s.trial_df[s.trial_df['Stimulus'] == t]
try:
results[nm + '_acc'] = sum(case_trials['Correct']) / case_trials.shape[0]
except ZeroDivisionError:
results[nm + '_acc'] = np.nan
if t_attrs['corr_resp'] != 0: # response required
case_trials.drop(case_trials[~case_trials['Correct']].index, inplace=True)
results[nm + '_medianrt'] = case_trials['Reaction Time (ms)'].median()
s.results = results
def load_data(s):
''' prepare needed data from the h5py pointer '''
f = h5py.File(s.filepath)
s.exp = f['file/experiment/experiment'][0][-3][0].decode()
s.case_dict = {}
for column in f['file/run/case/case']:
s.case_dict.update({column[3][0]: {'code': column[-3][0].decode(),
'descriptor': column[-2][0].decode(),
'corr_resp': column[4][0],
'resp_win': column[9][0]}})
s.acceptable_resps = set(v['corr_resp'] for v in s.case_dict.values())
s.type_seq = np.array([col[1][0] for col in f['file/run/event/event']])
s.resp_seq = np.array([col[2][0] for col in f['file/run/event/event']])
s.time_seq = np.array([col[-1][0] for col in f['file/run/event/event']])
def load_data_mc(s):
''' prepare needed data from the h5py pointer for a masscomp file '''
f = h5py.File(s.filepath)
s.exp = f['file/experiment/experiment'][0][-3][0].decode()
s.case_dict = {}
for column in f['file/run/case/case']:
s.case_dict.update({column[3][0]: {'code': column[-3][0].decode(),
'descriptor': column[-2][0].decode(),
'corr_resp': column[4][0],
'resp_win': column[9][0]}})
base_trialarray = f['file/run/trial/trial'][:]
np_array = np.array([[elem[0] for elem in row] for row in base_trialarray])
s.trial_df = pd.DataFrame(np_array)
s.trial_df.iloc[:, :4] = s.trial_df.iloc[:, :4].applymap(int)
s.trial_df.iloc[:, 4:8] = s.trial_df.iloc[:, 4:8].applymap(bool)
s.trial_df.iloc[:, 8:12] = s.trial_df.iloc[:, 8:12].applymap(float)
s.trial_df.columns = s.trial_columns
s.trial_df.set_index('Trial', inplace=True)
def parse_seq(s):
''' parse the behavioral sequence and create a dataframe containing
the event table '''
bad_respcodes = ~np.in1d(s.resp_seq, list(range(0, 9)))
# bad_respcodes = ~np.in1d(s.resp_seq, [0, 1, 8]) # if strict about acceptable responses
if np.any(bad_respcodes):
s.resp_seq[bad_respcodes] = 0
nonresp_respcodes = (s.resp_seq != 0) & (s.type_seq != 0)
if np.any(nonresp_respcodes):
s.resp_seq[nonresp_respcodes] = 0
s.ev_len = len(s.type_seq)
s.errant = np.zeros(s.ev_len, dtype=bool)
s.early = np.zeros(s.ev_len, dtype=bool)
s.late = np.zeros(s.ev_len, dtype=bool)
s.correct = np.zeros(s.ev_len, dtype=bool)
s.noresp = np.zeros(s.ev_len, dtype=bool)
s.type_descriptor = []
# this is the main algorithm applied to the event sequence
s.parse_alg()
event_interval_ms = np.concatenate([[0], np.diff(s.time_seq) * 1000])
rt = np.empty_like(event_interval_ms) * np.nan
rt[(s.resp_seq != 0) & ~s.errant] = \
event_interval_ms[(s.resp_seq != 0) & ~s.errant]
s.type_descriptor = np.array(s.type_descriptor, dtype=np.object_)
dd = {'type_seq': s.type_seq, 'type_descriptor': s.type_descriptor,
'correct': s.correct, 'rt': rt, 'resp_seq': s.resp_seq,
'noresp': s.noresp,
'errant': s.errant, 'early': s.early, 'late': s.late,
'time_seq': s.time_seq, 'event_intrvl_ms': event_interval_ms}
ev_df = pd.DataFrame(dd)
# reorder columns
col_order = ['type_seq', 'type_descriptor', 'correct', 'rt', 'resp_seq',
'noresp', 'errant', 'early', 'late', 'time_seq', 'event_intrvl_ms']
ev_df = ev_df[col_order]
s.ev_df = ev_df
def parse_alg(s):
''' algorithm applied to event structure. the underlying philosophy is:
each descriptive of an event is false unless proven true '''
for ev, t in enumerate(s.type_seq):
if t == 0: # some kind of response
prev_t = s.type_seq[ev - 1]
if ev == 0 or prev_t not in s.case_dict:
# first code is response, previous event is also response,
# or previous event is unrecognized
s.type_descriptor.append('rsp_err')
s.errant[ev] = True
continue
else:
# early / late responses
# early is considered incorrect, while late can be correct
tmp_rt = (s.time_seq[ev] - s.time_seq[ev - 1]) * 1000
if tmp_rt > s.case_dict[prev_t]['resp_win']:
s.late[ev] = True
elif tmp_rt < s.min_resptime:
s.early[ev] = True
s.type_descriptor.append('rsp_early')
continue
# interpret correctness (could have been late)
if s.resp_seq[ev] == s.case_dict[prev_t]['corr_resp']:
s.type_descriptor.append('rsp_correct')
s.correct[ev] = True
continue
else:
s.type_descriptor.append('rsp_incorrect')
continue
else: # some kind of stimulus
if t in s.case_dict:
s.type_descriptor.append(s.exp + '_' + s.case_dict[t]['code'])
# interpret correctness
if ev + 1 == s.ev_len: # if the last event
# only correct if correct resp is no response
if s.case_dict[t]['corr_resp'] == 0:
s.correct[ev] = True
else: # if not the last event
# only considered correct if following resp was correct
if s.resp_seq[ev + 1] == s.case_dict[t]['corr_resp']:
s.correct[ev] = True
# if incorrect, note if due to response omission
elif s.case_dict[t]['corr_resp'] != 0 and \
s.resp_seq[ev + 1] == 0:
s.noresp[ev] = True
else:
s.type_descriptor.append('stm_unknown')
class MT_File:
''' manually picked files from eeg experiments
initialization only parses the filename, call parse_file to load data
'''
columns = ['subject_id', 'experiment', 'version', 'gender', 'age', 'case_num',
'electrode', 'peak', 'amplitude', 'latency', 'reaction_time']
cases_peaks_by_experiment = {'aod': {(1, 'tt'): ['N1', 'P3'],
(2, 'nt'): ['N1', 'P2']
},
'vp3': {(1, 'tt'): ['N1', 'P3'],
(2, 'nt'): ['N1', 'P3'],
(3, 'nv'): ['N1', 'P3']
},
'ant': {(1, 'a'): ['N4', 'P3'],
(2, 'j'): ['N4', 'P3'],
(3, 'w'): ['N4', 'P3'],
# (4, 'p'): ['P3', 'N4']
}
}
# string for reference
data_structure = '{(case#,peak):{electrodes:(amplitude,latency),reaction_time:time} }'
ant_cases_types_lk = [((1, 'A', 'Antonym'),
(2, 'J', 'Jumble'),
(3, 'W', 'Word'),
(4, 'P', 'Prime')),
((1, 'T', 'jumble'),
(2, 'T', 'prime'),
(3, 'T', 'antonym'),
(4, 'T', 'other')),
((1, 'T', 'jumble'),
(2, 'T', ' prime'),
(3, 'T', ' antonym'),
(4, 'T', ' other')),
((1, 'T', 'jumble'),
(2, 'T', 'prime'),
(3, 'T', 'antonym'),
(4, 'T', 'word'))]
case_fields = ['case_num', 'case_type', 'descriptor']
ant_case_convD = {0: {1: 1, 2: 2, 3: 3, 4: 4}, # Translates case0 to each case
1: {1: 3, 2: 1, 3: 4, 4: 2},
2: {1: 3, 2: 1, 3: 4, 4: 2},
3: {1: 3, 2: 1, 3: 4, 4: 2}}
# 4:{1:1,2:2,3:3,4:4} }
case_nums2names = {'aod': {1: 't', 2: 'nt'},
'vp3': {1: 't', 2: 'nt', 3: 'nv'},
'ant': {1: 'a', 2: 'j', 3: 'w', 4: 'p'},
'cpt': {1: 'g', 2: 'c', 3: 'cng',
4: 'db4ng', 5: 'ng', 6: 'dad'},
'stp': {1: 'c', 2: 'i'},
}
query_fields = ['id', 'session', 'experiment']
def normAntCase(s):
case_nums = tuple( sorted([ k for k in s.header['cases_peaks'].keys() ]) )
if case_nums == (1,3,4): # Inconsistent ordering
with open('/active_projects/db/logs/134_cases_HBNL4.log','a') as logf:
logf.write(s.fullpath+'\n')
return {1:2,2:4,3:1,4:3}
else:
query = {k: v for k, v in s.file_info.items() if k in s.query_fields}
doc = D.Mdb['avgh1s'].find_one(query)
avgh1_path = doc['filepath']
case_tup = extract_case_tuple(avgh1_path)
case_type = MT_File.ant_cases_types_lk.index(case_tup)
return MT_File.ant_case_convD[case_type]
def __init__(s, filepath):
s.fullpath = filepath
s.filename = os.path.split(filepath)[1]
s.header = {'cases_peaks': {}}
s.parse_fileinfo()
s.data = dict()
s.data['uID'] = s.file_info['id'] + '_' + s.file_info['session']
if s.file_info['experiment'] == 'ant':
s.normed_cases_calc()
s.parse_header()
def parse_fileinfo(s):
s.file_info = parse_filename(s.filename)
def __repr__(s):
return '<mt-file object ' + str(s.file_info) + ' >'
def parse_header(s):
of = open(s.fullpath, 'r')
reading_header = True
s.header_lines = 0
cases = []
while reading_header:
file_line = of.readline()
if len(file_line) < 2 or file_line[0] != '#':
reading_header = False
continue
s.header_lines += 1
line_parts = [pt.strip() for pt in file_line[1:-1].split(';')]
if 'nchans' in line_parts[0]:
s.header['nchans'] = int(line_parts[0].split(' ')[1])
elif 'case' in line_parts[0]:
cs_pks = [lp.split(' ') for lp in line_parts]
if cs_pks[1][0] != 'npeaks':
s.header['problems'] = True
else:
case = int(cs_pks[0][1])
peak = int(cs_pks[1][1])
# if 'normed_cases' in dir(s):
# case = s.normed_cases[case]
s.header['cases_peaks'][case] = peak
cases.append( case )
s.header['case_tup'] = tuple(sorted(list(set(cases))))
s.normed_cases_calc()
of.close()
def normed_cases_calc(s):
try:
norm_dict = s.normAntCase()
s.normed_cases = norm_dict
except:
s.normed_cases = MT_File.ant_case_convD[0]
s.norm_fail = True
def parse_fileDB(s, general_info=False):
s.parse_file()
exp = s.file_info['experiment']
ddict = {}
for k in s.mt_data: # for
case_convdict = s.case_nums2names[exp]
case = case_convdict[int(k[0])]
peak = k[1]
inner_ddict = {}
for chan, amp_lat in s.mt_data[k].items(): # chans - reaction time in parallel
if type(amp_lat) is tuple: # if amp / lat tuple
inner_ddict.update(
{chan: {'amp': float(amp_lat[0]),
'lat': float(amp_lat[1])}}
)
elif chan == 'reaction_time':
inner_ddict['rt'] = float(amp_lat)
ddict[case + '_' + peak] = inner_ddict
ddict['filepath'] = s.fullpath
ddict['run'] = s.file_info['run']
ddict['version'] = s.file_info['version']
if general_info:
s.data.update(s.file_info)
del s.data['experiment']
del s.data['run']
del s.data['version']
s.data['ID'] = s.data['id']
s.data[exp] = ddict
def parse_file(s):
of = open(s.fullpath, 'r')
data_lines = of.readlines()[s.header_lines:]
of.close()
s.mt_data = OrderedDict()
for L in data_lines:
Ld = {c: v for c, v in zip(s.columns, L.split())}
if 'normed_cases' in dir(s):
Ld['case_num'] = s.normed_cases[int(Ld['case_num'])]
key = (int(Ld['case_num']), Ld['peak'])
if key not in s.mt_data:
s.mt_data[key] = OrderedDict()
s.mt_data[key][Ld['electrode'].upper()] = (
Ld['amplitude'], Ld['latency'])
if 'reaction_time' not in s.mt_data[key]:
s.mt_data[key]['reaction_time'] = Ld['reaction_time']
return
def parse_fileDF(s):
s.dataDF = pd.read_csv(s.fullpath, delim_whitespace=True,
comment='#', names=s.columns)
def check_peak_order(s):
''' Pandas Dataframe based '''
if 'dataDF' not in dir(s):
s.parse_fileDF()
if 'normed_cases' in dir(s):
case_lk = {v: k for k, v in s.normed_cases.items()}
probs = {}
# peaks by case number
case_peaks = {k[0]: v for k, v in
s.cases_peaks_by_experiment[s.file_info['experiment']].items()}
cols_use = ['electrode', 'latency']
for case in s.dataDF['case_num'].unique():
cDF = s.dataDF[s.dataDF['case_num'] == case]
if 'normed_cases' in dir(s):
case_norm = case_lk[case]
else:
case_norm = case
if case_norm in case_peaks:
pk = case_peaks[case_norm][0]
ordDF = cDF[cDF['peak'] == pk][cols_use]
ordDF.rename(columns={'latency': 'latency_' + pk}, inplace=True)
peak_track = [pk]
delta_cols = []
if case in case_peaks:
for pk in case_peaks[case][1:]:
pkDF = cDF[cDF['peak'] == pk][cols_use]
pkDF.rename(columns={'latency': 'latency_' + pk}, inplace=True)
# return (ordDF, pkDF)
ordDF = ordDF.join(pkDF, on='electrode', rsuffix=pk)
delta_col = pk + '_' + peak_track[-1] + '_delta'
ordDF[delta_col] = \
ordDF['latency_' + pk] - ordDF['latency_' + peak_track[-1]]
peak_track.append(pk)
delta_cols.append(delta_col)
for dc in delta_cols:
wrong_order = ordDF[ordDF[dc] < 0]
if len(wrong_order) > 0:
case_name = s.case_nums2names[s.file_info['experiment']][case_norm]
probs[case_name + '_' + dc] = list(wrong_order['electrode'])
if len(probs) == 0:
return True
else:
return probs
def check_max_latency(s, latency_thresh=1000):
''' Pandas Dataframe based '''
if 'dataDF' not in dir(s):
s.parse_fileDF()
high_lat = s.dataDF[s.dataDF['latency'] > latency_thresh]
if len(high_lat) == 0:
return True
else:
return high_lat[['case_num', 'electorde', 'peak', 'amplitude', 'latency']]
def build_header(s):
if 'mt_data' not in dir(s):
s.parse_file()
cases_peaks = list(s.mt_data.keys())
cases_peaks.sort()
header_data = OrderedDict()
for cp in cases_peaks:
if cp[0] not in header_data:
header_data[cp[0]] = 0
header_data[cp[0]] += 1
# one less for reaction_time
s.header_text = '#nchans ' + \
str(len(s.mt_data[cases_peaks[0]]) - 1) + '\n'
for cs, ch_count in header_data.items():
s.header_text += '#case ' + \
str(cs) + '; npeaks ' + str(ch_count) + ';\n'
print(s.header_text)
def build_file(s):
pass
def check_header_for_experiment(s):
expected = s.cases_peaks_by_experiment[s.file_info['experiment']]
if len(expected) != len(s.header['cases_peaks']):
return 'Wrong number of cases'
case_problems = []
for pknum_name, pk_list in expected.items():
if s.header['cases_peaks'][pknum_name[0]] != len(pk_list):
case_problems.append(
'Wrong number of peaks for case ' + str(pknum_name))
if case_problems:
return str(case_problems)
return True
def check_peak_identities(s):
if 'mt_data' not in dir(s):
s.parse_file()
for case, peaks in s.cases_peaks_by_experiment[s.file_info['experiment']].items():
if (case[0], peaks[0]) not in s.mt_data:
return False, 'case ' + str(case) + ' missing ' + peaks[0] + ' peak'
if (case[0], peaks[1]) not in s.mt_data:
return False, 'case ' + str(case) + ' missing ' + peaks[1] + ' peak'
return True
def check_peak_orderNmax_latency(s, latency_thresh=1000):
if 'mt_data' not in dir(s):
s.parse_file()
for case, peaks in s.cases_peaks_by_experiment[s.file_info['experiment']].items():
try:
latency1 = float(s.mt_data[(case[0], peaks[0])]['FZ'][1])
latency2 = float(s.mt_data[(case[0], peaks[1])]['FZ'][1])
except:
print(s.fullpath + ': ' +
str(s.mt_data[(case[0], peaks[0])].keys()))
if latency1 > latency_thresh:
return (
False,
str(case) + ' ' + peaks[0] + ' ' + 'exceeds latency threshold (' + str(latency_thresh) + 'ms)')
if latency2 > latency_thresh:
return (
False,
str(case) + ' ' + peaks[1] + ' ' + 'exceeds latency threshold (' + str(latency_thresh) + 'ms)')
if latency1 > latency2:
return False, 'Wrong order for case ' + str(case)
return True
def move_picked_files_to_processed(from_base, from_folders, working_directory, filter_list=[], do_now=False):
''' utility for moving processed files - places files in appropriate folders based on filenames
inputs:
from_base - folder containing all from_folders
from_folders - list of subfolders
working_directory - folder to store delete list (/active_projects can only be modified by exp)
filter_list - a list by which to limit the files
do_now - must be set to true to execute - by default, just a list of proposed copies is returned
'''
to_base = '/processed_data/mt-files/'
to_copy = []
counts = {'non coga': 0, 'total': 0,
'to move': 0, 'masscomp': 0, 'neuroscan': 0}
if do_now:
delete_file = open(os.path.join(working_directory,
next_file_with_base(working_directory, 'picked_files_copied_to_processed',
'lst')), 'w')
for folder in from_folders:
for reject in [False, True]:
from_folder = os.path.join(from_base, folder)
if reject:
from_folder += os.path.sep + 'reject'
if not os.path.exists(from_folder):
print(from_folder + ' Does Not Exist')
continue
print('checking: ' + from_folder)
files = [f for f in os.listdir(from_folder) if not os.path.isdir(
os.path.join(from_folder, f))]
if filter_list:
print(len(files))
files = [f for f in files if any(
[s in f for s in filter_list])]
print(len(files))
for file in files:
counts['total'] += 1
if not ('.lst' in file or '.txt' in file or '_list' in file):
try:
file_info = parse_filename(file)
if 'subjects' in file_info['site']:
counts['non coga'] += 1
if file_info['system'] == 'masscomp':
counts['masscomp'] += 1
type_short = 'mc'
session_path = None
else:
counts['neuroscan'] += 1
type_short = 'ns'
session_path = file_info['session'] + '-session'
to_path = to_base + file_info['experiment'] + os.path.sep + file_info[
'site'] + os.path.sep + type_short + os.path.sep
if session_path:
to_path += session_path + os.path.sep
if reject:
to_path += 'reject' + os.path.sep
to_copy.append(
(from_folder + os.path.sep + file, to_path))
counts['to move'] += 1
except:
print('uninterpretable file: ' + file)
print(str(counts['total']) + ' total (' + str(counts['masscomp']) + ' masscomp, ' + str(
counts['neuroscan']) + ' neuroscan) ' + str(counts['to move']) + ' to move')
print('total non coga: ' + str(counts['non coga']))
if do_now:
for cf_dest in to_copy:
delete_file.write(cf_dest[0] + '\n')
if not os.path.exists(cf_dest[1]):
os.makedirs(cf_dest[1])
shutil.copy2(cf_dest[0], cf_dest[1])
delete_file.close()
return to_copy
class ERO_CSV:
''' Compilations in processed data '''
columns = ['ID', 'session', 'trial', 'F3', 'FZ',
'F4', 'C3', 'CZ', 'C4', 'P3', 'PZ', 'P4']
parameterD = {'e': {'name': 'electrodes',
'values': {'1': 'all',
'4': 'center 9'}
},
'b': {'name': 'baseline type',
'values': {'0': 'none',
'1': 'mean'}},
# 'm':{},
'hi': {'name': 'hi-pass', 'values': 'numeric'},
'lo': {'name': 'lo-pass', 'values': 'numeric'},
'n': {'name': 'minimum trials', 'values': 'numeric'},
's': {'name': 'threshold electrodes', 'values': 'numeric'},
't': {'name': 'threshold level', 'values': 'numeric'},
'u': {'name': 'threshold min time', 'values': 'numeric'},
'v': {'name': 'threshold max time', 'values': 'numeric'},
}
defaults_by_exp = {}
@staticmethod
def parse_parameters(param_string, unknown=set()):
pD = {'unknown': unknown}
for p in param_string.split('-'):
pFlag = p[0]
if pFlag in ERO_CSV.parameterD:
pLookup = ERO_CSV.parameterD[pFlag]
pval = p[1:]
pOpts = pLookup['values']
if pOpts == 'numeric':
pval = int(pval)
else:
pval = pOpts[pval]
pD[pLookup['name']] = pval
else:
pD['unknown'].update(p)
return pD
def __init__(s, filepath):
s.filepath = filepath
s.filename = os.path.split(filepath)[1]
s.parameters = ERO_CSV.defaults_by_exp.copy()
s.parse_fileinfo()
def parse_fileinfo(s):
path_parts = s.filepath.split(os.path.sep)
calc_version = path_parts[2][-3:]
path_parameters = path_parts[-3]
site = path_parts[-2]
s.parameters.update(ERO_CSV.parse_parameters(path_parameters))
file_parts = s.filename.split('_')
exp, case = file_parts[0].split('-')
freq_min, freq_max = [float(v) for v in file_parts[1].split('-')]
time_min, time_max = [int(v) for v in file_parts[2].split('-')]
for param in file_parts[3:-4]:
s.parameters.update(ERO_CSV.parse_parameters(param,
unknown=s.parameters['unknown']))
s.parameters['unknown'] = list(s.parameters['unknown'])
s.parameters['version'] = calc_version
pwr_type = file_parts[-4].split('-')[0]
date = file_parts[-1].split('.')[0]
mod_date = datetime.fromtimestamp(os.path.getmtime(s.filepath))
s.exp_info = {'experiment': exp,
'case': case,
'site': site}
s.dates = {'file date': date,
'mod date': mod_date}
s.phenotype = {'power type': pwr_type,
'frequency min': freq_min,
'frequency max': freq_max,
'time min': time_min,
'time max': time_max}
def read_data(s):
''' prepare the data field for the database object '''
s.data = pd.read_csv(s.filepath, converters={'ID': str},
na_values=['.'], error_bad_lines=False, warn_bad_lines=True)
dup_cols = [col for col in s.data.columns if '.' in col]
s.data.drop(dup_cols, axis=1, inplace=True)
def data_for_file(s):
fileD = s.phenotype.copy()
fileD.update(s.exp_info)
fileD.update(s.parameters)
fileD.update(s.dates)
return fileD
def data_by_sub_ses(s):
''' returns an iterator over rows of data by subject and session including
file and phenotype info '''
s.read_data()
for row in s.data.to_dict(orient='records'):
row.update(s.exp_info)
row.update(s.phenotype)
yield row
def data_forjoin(s):
''' creates unique doc identifying field and renames columns
in preparation for joining with other CSVs '''
s.read_data()
if s.data.shape[1] <= 3:
s.data = pd.DataFrame()
if s.data.empty:
return
s.data['uID'] = s.data.apply(join_ufields, axis=1,
args=[s.exp_info['experiment']])
s.data.drop(['ID', 'session'], axis=1, inplace=True)
s.data.set_index('uID', inplace=True)
bad_list = ['50338099_a_vp3', '50700072_a_vp3', '50174138_e_vp3', '50164139_c_vp3', '50126477_a_vp3']
drop_rows = [uID for uID in s.data.index.values if uID in bad_list]
s.data.drop(drop_rows, inplace=True)
param_str = ''
if 'version' in s.parameters:
param_str += s.parameters['version']
if 'electrodes' in s.parameters:
param_str += '-' + str(s.parameters['electrodes'])
if 'threshold min time' in s.parameters:
param_str += '-' + str(s.parameters['threshold min time'])
rename_dict = {col: '_'.join(['data',
param_str,
s.phenotype['power type'],
s.exp_info['case'],
str(s.phenotype['frequency min']).replace('.', 'p'),
str(s.phenotype['frequency max']).replace('.', 'p'),
str(s.phenotype['time min']),
str(s.phenotype['time max']),
col])
for col in s.data.columns}
s.data.rename(columns=rename_dict, inplace=True)
class ERO_Summary_CSV(ERO_CSV):
''' Compilations in processed data/csv-files-*/ERO-results '''
rem_columns = ['sex', 'EROage', 'POP', 'wave12-race', '4500-race',
'ccGWAS-race', 'COGA11k-race', 'alc_dep_dx', 'alc_dep_ons']
def parse_fileinfo(s):
path_parts = s.filepath.split(os.path.sep)
calc_version = path_parts[2][-3:]
file_parts = s.filename.split('_')
end_parts = file_parts[-1].split('.')
calc_parameters = end_parts[0]
s.parameters.update(ERO_Summary_CSV.parse_parameters(calc_parameters))
exp, case = file_parts[0].split('-')
freq_min, freq_max = [float(v) for v in file_parts[1].split('-')]
time_min, time_max = [int(v) for v in file_parts[2].split('-')]
pwr_type = file_parts[3].split('-')[0]
date = end_parts[1]
mod_date = datetime.fromtimestamp(os.path.getmtime(s.filepath))
s.exp_info = {'experiment': exp,
'case': case}
s.dates = {'file date': date,
'mod date': mod_date}
s.phenotype = {'calc version': calc_version,
'power type': pwr_type,
'frequency min': freq_min,
'frequency max': freq_max,
'time min': time_min,
'time max': time_max}
def read_data(s):
s.data = pd.read_csv(s.filepath, converters={
'ID': str}, na_values=['.'])
s.data.drop(s.rem_columns, axis=1, inplace=True) # drop extra cols
dup_cols = [col for col in s.data.columns if '.' in col]
s.data.drop(dup_cols, axis=1, inplace=True)
def data_3tuple_bulklist(s):
s.read_data()
if s.data.empty:
return
s.data['uID'] = s.data.apply(join_ufields, axis=1,
args=[s.exp_info['experiment']])
for k, v in s.exp_info.items():
s.data[k] = v
for k, v in s.phenotype.items():
s.data[k] = str(v).replace('.', 'p')
s.data = list(s.data.to_dict(orient='records'))
class ERN_extract:
ex_funs = ['extract_ern_react','extract_ern_val']
def __init__(s, filepath):
s.filepath = filepath
s.path = os.path.dirname(s.filepath)
s.path_parts = filepath.split(os.path.sep)
s.filename = os.path.splitext(s.path_parts[-1])[0]
s.file_info = parse_filename(filepath)
data = s.extract_data
def extract_data(s):
pass
##############################
##
# Neuropsych
##
##############################
class Neuropsych_XML:
''' given filepath to .xml file in /raw_data/neuropsych/, represent it '''
# labels for fields output by david's awk script
cols = ['id',
'dob',
'gender',
'hand',
'testdate',
'sessioncode',
'motiv_avg',
'motiv_cbst',
'motiv_tolt',
'age',
'tolt_3b_mim',
'tolt_3b_mom',
'tolt_3b_em',
'tolt_3b_ao',
'tolt_3b_apt',
'tolt_3b_atoti',
'tolt_3b_ttrti',
'tolt_3b_atrti',
'tolt_4b_mim',
'tolt_4b_mom',
'tolt_4b_em',
'tolt_4b_ao',
'tolt_4b_apt',
'tolt_4b_atoti',
'tolt_4b_ttrti',
'tolt_4b_atrti',
'tolt_5b_mim',
'tolt_5b_mom',
'tolt_5b_em',
'tolt_5b_ao',
'tolt_5b_apt',
'tolt_5b_atoti',
'tolt_5b_ttrti',
'tolt_5b_atrti',
'tolt_tt_mim',
'tolt_tt_mom',
'tolt_tt_em',
'tolt_tt_ao',
'tolt_tt_apt',
'tolt_tt_atoti',
'tolt_tt_ttrti',
'tolt_tt_atrti',
'tolt_3b_otr',
'tolt_4b_otr',
'tolt_5b_otr',
'tolt_tt_otr',
'vst_f_tc',
'vst_f_span',
'vst_f_tcat',
'vst_f_tat',
'vst_b_tc',
'vst_b_span',
'vst_b_tcat',
'vst_b_tat']
# this function needs to be in /usr/bin of the invoking system
func_name = '/opt/bin/do_np_processC'
session_letters = 'abcdefghijklmnop'
npsession_npfollowup = {letter: number for letter, number in
zip(session_letters, range(len(session_letters)))}
def __init__(s, filepath):
s.filepath = filepath
s.path = os.path.dirname(s.filepath)
s.path_parts = filepath.split(os.path.sep)
s.filename = os.path.splitext(s.path_parts[-1])[0]
s.fileparts = s.filename.split('_')
s.site = s.path_parts[-3]
s.subject_id = s.fileparts[0]
s.session = s.fileparts[1]
s.data = {'ID': s.subject_id,
'site': s.site,
'np_session': s.session,
'np_followup': s.npsession_npfollowup[s.session],
'filepath': s.filepath,
}
s.read_file()
def read_file(s):
''' use program s.func_name to extract results and put in s.data '''
raw_line = subprocess.check_output([s.func_name, s.filepath])
data_dict = s.parse_csvline(raw_line)
data_dict.pop('id', None)
data_dict.pop('sessioncode', None)
s.data.update(data_dict)
def parse_csvline(s, raw_line):
''' given a string which is a comma-delimited list of results (the raw output of s.func_name)
parse into a list and parse its items '''
# [:-1] excludes the \n at line end
lst = raw_line[:-1].decode('utf-8').split(',')
# convert to dict in anticipation of storing as record
d = dict(zip(s.cols, lst))
# convert dict items to appropriate types
for k, v in d.items():
d[k] = s.parse_csvitem(k, d.pop(k)) # pop passes the val to parser
return d
@staticmethod
def parse_csvitem(k, v):
''' given a string item from the results, parse it appropriately '''
if v == ' ' or v == ' ':
return None # these will get safely coerced to NaN by pandas df
else:
v = v.lstrip() # remove leading whitespace
if k in ['dob', 'testdate']:
v = datetime.strptime(v, '%m/%d/%Y') # dates
elif k in ['id', 'gender', 'hand', 'sessioncode']:
pass # leave these as strings
elif '_ao' in k:
v = float(v[:-1]) # percentages converted to proportions
else:
v = float(v) # all other data becomes float
return v
def assure_quality(s):
''' after results have been extracted, perform quality assurance checks on them '''
try:
# check if TOLT or CBST data is missing - if so, set motivation to none
if 'tolt_5b_mim' not in s.data or s.data['tolt_5b_mim'] is None:
s.data['motiv_tolt'] = None
if 'vst_f_tat' not in s.data or s.data['vst_f_tat'] is None:
s.data['motiv_cbst'] = None
# then re-calculate mean
motivs = [motiv for motiv in (s.data['motiv_tolt'], s.data['motiv_cbst']) if motiv]
if motivs:
s.data['motiv_avg'] = np.mean(motivs)
else:
s.data['motiv_avg'] = None
# set CBST fields of 0 to be None
# if any forward field is 0, set all forward to None
# if any backward field is 0, set all backward to None
if s.data['vst_f_span'] == 0:
for field in ['vst_f_tc', 'vst_f_span', 'vst_f_tcat', 'vst_f_tat']:
s.data[field] = None
if s.data['vst_b_span'] == 0:
for field in ['vst_b_tc', 'vst_b_span', 'vst_b_tcat', 'vst_b_tat']:
s.data[field] = None
except KeyError:
print('Missing key in ',s.data['filepath'])
return None
class Neuropsych_Summary:
def __init__(s, filepath):
s.filepath = filepath
s.path = os.path.dirname(s.filepath)
s.path_parts = filepath.split(os.path.sep)
s.filename = os.path.splitext(s.path_parts[-1])[0]
s.fileparts = s.filename.split('_')
s.site = s.path_parts[-3]
s.subject_id = s.fileparts[0]
s.session = s.fileparts[3][0]
s.motivation = int(s.fileparts[3][1])
s.xmlname = '_'.join([s.subject_id, s.session, 'sub.xml'])
s.xmlpath = os.path.join(s.path, s.xmlname)
s.data = {'ID': s.subject_id,
'site': s.site,
'session': s.session,
'motivation': s.motivation,
}
def read_file(s):
of = open(s.filepath)
lines = [l.strip() for l in of.readlines()]
of.close()
# find section line numbers
section_beginnings = [lines.index(
k) for k in s.section_header_funs_names] + [-1]
ind = -1
for sec, fun_nm in s.section_header_funs_names.items():
ind += 1
sec_cols = lines[section_beginnings[ind] + 1].split('\t')
sec_lines = [L.split('\t') for L in lines[section_beginnings[
ind] + 2:section_beginnings[ind + 1]]]
s.data[fun_nm[1]] = eval('s.' + fun_nm[0])(sec_cols, sec_lines)
class TOLT_Summary_File(Neuropsych_Summary):
integer_columns = ['PegCount', 'MinimumMoves', 'MovesMade', 'ExcessMoves']
float_columns = ['AvgPickupTime', 'AvgTotalTime', 'AvgTrialTime',
'%AboveOptimal', 'TotalTrialsTime', 'AvgTrialsTime']
# boolean_columns = {}
section_header_funs_names = OrderedDict([
('Trial Summary', ('parse_trial_summary', 'trials')),
('Test Summary', ('parse_test_summary', 'tests'))])
def parse_trial_summary(s, trial_cols, trial_lines):
trials = {}
for trial_line in trial_lines:
trialD = {}
for col, val in zip(trial_cols, trial_line):
val = parse_value_with_info(
val, col, s.integer_columns, s.float_columns)
if col == 'TrialNumber':
trial_num = val
else:
trialD[col] = val
trials[trial_num] = trialD
return trials
def parse_test_summary(s, test_cols, test_lines):
# summary data is transposed
for lnum, tl in enumerate(test_lines):
if tl[0][0] == '%':
test_lines[lnum] = [tl[0]] + \
[st[:-1] if '%' in st else st for st in tl[1:]]
# print(type(tl),tl)
# print([ st[:-1] if '%' in st else st for st in tl[1:] ])
# tlinesP.append( tl[0] + [ st[:-1] if '%' in st else st for st in tl[1:] ] )
test_data = {line[0]: [parse_value_with_info(val, line[0], s.integer_columns, s.float_columns)
for val in line[1:]] for line in test_lines}
caseD = {} # case:{} for case in test_cols[1:] }
for cnum, case in enumerate(test_cols[1:]):
caseD[case] = {stat: data[cnum]
for stat, data in test_data.items()}
return caseD
def __init__(s, filepath):
Neuropsych_Summary.__init__(s, filepath)
s.read_file()
class CBST_Summary_File(Neuropsych_Summary):
integer_columns = ['Trials', 'TrialsCorrect']
float_columns = ['TrialTime', 'AverageTime']
boolean_columns = {'Direction': [
'Backward', 'Forward'], 'Correct': ['-', '+']} # False, True
section_header_funs_names = {'Trial Summary': ('parse_trial_summary', 'trials'),
'Test Summary': ('parse_test_summary', 'tests')}
def parse_trial_summary(s, trial_cols, trial_lines):
trials = {}
for trial_line in trial_lines:
trialD = {}
for col, val in zip(trial_cols, trial_line):
val = parse_value_with_info(
val, col, s.integer_columns, s.float_columns, s.boolean_columns)
if col == 'TrialNum':
trial_num = val
else:
trialD[col] = val
trials[trial_num] = trialD
return trials
def parse_test_summary(s, test_cols, test_lines):
tests = {'Forward': {}, 'Backward': {}}
for test_line in test_lines:
testD = {}
for col, val in zip(test_cols, test_line):
if col == 'Direction':
dirD = tests[val]
else:
val = parse_value_with_info(
val, col, s.integer_columns, s.float_columns, s.boolean_columns)
if col == 'Length':
test_len = val
else:
testD[col] = val
dirD[test_len] = testD
return tests
def __init__(s, filepath):
Neuropsych_Summary.__init__(s, filepath)
s.read_file()
def parse_value_with_info(val, column, integer_columns, float_columns, boolean_columns={}):
if column in integer_columns:
val = int(val)
elif column in float_columns:
val = float(val)
elif column in boolean_columns:
val = bool(boolean_columns[column].index(val))
return val
| gpl-3.0 |
glennq/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/metrics/pairwise.py | 28 | 46720 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params : boolean
Whether to filter invalid parameters or not.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| gpl-3.0 |
Eric89GXL/mne-python | examples/stats/plot_sensor_regression.py | 8 | 4049 | """
============================================================================
Analysing continuous features with binning and regression in sensor space
============================================================================
Predict single trial activity from a continuous variable.
A single-trial regression is performed in each sensor and timepoint
individually, resulting in an :class:`mne.Evoked` object which contains the
regression coefficient (beta value) for each combination of sensor and
timepoint. This example shows the regression coefficient; the t and p values
are also calculated automatically.
Here, we repeat a few of the analyses from [1]_. This can be easily performed
by accessing the metadata object, which contains word-level information about
various psycholinguistically relevant features of the words for which we have
EEG activity.
For the general methodology, see e.g. [2]_.
References
----------
.. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand
words are worth a picture: Snapshots of printed-word processing in an
event-related potential megastudy. Psychological Science, 2015
.. [2] Hauk et al. The time course of visual word recognition as revealed by
linear regression analysis of ERP data. Neuroimage, 2006
"""
# Authors: Tal Linzen <[email protected]>
# Denis A. Engemann <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import pandas as pd
import mne
from mne.stats import linear_regression, fdr_correction
from mne.viz import plot_compare_evokeds
from mne.datasets import kiloword
# Load the data
path = kiloword.data_path() + '/kword_metadata-epo.fif'
epochs = mne.read_epochs(path)
print(epochs.metadata.head())
##############################################################################
# Psycholinguistically relevant word characteristics are continuous. I.e.,
# concreteness or imaginability is a graded property. In the metadata,
# we have concreteness ratings on a 5-point scale. We can show the dependence
# of the EEG on concreteness by dividing the data into bins and plotting the
# mean activity per bin, color coded.
name = "Concreteness"
df = epochs.metadata
df[name] = pd.cut(df[name], 11, labels=False) / 10
colors = {str(val): val for val in df[name].unique()}
epochs.metadata = df.assign(Intercept=1) # Add an intercept for later
evokeds = {val: epochs[name + " == " + val].average() for val in colors}
plot_compare_evokeds(evokeds, colors=colors, split_legend=True,
cmap=(name + " Percentile", "viridis"))
##############################################################################
# We observe that there appears to be a monotonic dependence of EEG on
# concreteness. We can also conduct a continuous analysis: single-trial level
# regression with concreteness as a continuous (although here, binned)
# feature. We can plot the resulting regression coefficient just like an
# Event-related Potential.
names = ["Intercept", name]
res = linear_regression(epochs, epochs.metadata[names], names=names)
for cond in names:
res[cond].beta.plot_joint(title=cond, ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
##############################################################################
# Because the :func:`~mne.stats.linear_regression` function also estimates
# p values, we can --
# after applying FDR correction for multiple comparisons -- also visualise the
# statistical significance of the regression of word concreteness.
# The :func:`mne.viz.plot_evoked_image` function takes a ``mask`` parameter.
# If we supply it with a boolean mask of the positions where we can reject
# the null hypothesis, points that are not significant will be shown
# transparently, and if desired, in a different colour palette and surrounded
# by dark contour lines.
reject_H0, fdr_pvals = fdr_correction(res["Concreteness"].p_val.data)
evoked = res["Concreteness"].beta
evoked.plot_image(mask=reject_H0, time_unit='s')
| bsd-3-clause |
IgorGlushkov/Peatfires_tools | firms_to_fusion_table_v2_both.py | 1 | 13442 | # -*- coding: UTF-8 -*-
__author__ = "IG,authentification block to Google FT by JOE STORY "
__copyright__ = ""
__license__ = ""
__modified__ = "## IG"
import json
import sys
import requests
import httplib2
import csv
#google-api-client
from apiclient.discovery import build
#oauth2client=1.5.2 also need PyOpenSSL installed
from oauth2client.client import SignedJwtAssertionCredentials
import pandas as pd
import time,os
from apiclient.http import MediaFileUpload
import glob
import psycopg2
#'''set by user'''
#Script for downloading, overlaying and uploading to Fusion Table(FT) and Drive, fires data (Firms) for Indonesia peatlands, FT have to be open for access and editing for e-mail from .json key file
#see http://tech.thejoestory.com/2015/05/gspread-python-module-and-more-push-ups.html and http://tech.thejoestory.com/2015/12/fusion-table-api-insert-data-python.html
#set sources / names and outputs!!TODO normal parser for arguments
#FT id code
tableId = "194jvU2DZMBX5tYg6VTCrrO_zwH8Vfo0KYG9jCumk"
#drive folder id code
folder_id = '0B2diDVTPYguodkhqOE9pRXllVlE'
#fire data interval
FIRE_LASTS ='24h'
#url to MODIS data
URL_MOD_FIRE_SHAPES = 'https://firms.modaps.eosdis.nasa.gov/active_fire/c6/text/MODIS_C6_Russia_and_Asia_%s.csv' % FIRE_LASTS
#url to VIIRS data
URL_VII_FIRE_SHAPES = 'https://firms.modaps.eosdis.nasa.gov/active_fire/viirs/text/VNP14IMGTDL_NRT_Russia_and_Asia_%s.csv' % FIRE_LASTS
#dirs for temporal and result files
source_dir='d:/Thematic/Peatfires/Russia_Peatlands/Firms_source'
source_sel='d:/Thematic/Peatfires/Russia_Peatlands/Firms_source/Temp'
result_dir='d:/Thematic/Peatfires/Russia_Peatlands/Firms_source/Result'
upload_dir='d:/Thematic/Peatfires/Russia_Peatlands/Firms_source/ToUpload'
#filenames for polygons (peatlands from GFW Indonesia_Peat_Lands.shp)
filename_peatlands = 'mask2'
#''set by user''
#set working path2filenames
def set_outputs(filename):
sourcepath = os.path.join(source_dir, '%s.csv'%(filename))
outpath_selcsv = os.path.join(source_sel, '%s.csv'%(filename))
outpath_selshp = os.path.join(source_sel, '%s.shp'%(filename))
outpath_selvrt = os.path.join(source_dir, '%s.vrt'%(filename))
outpath_tmpshp = os.path.join(source_dir, '%s_tmp.shp'%(filename))
outpath_rescsv = os.path.join(result_dir, '%s.csv'%(filename))
outpath_resshp = os.path.join(result_dir, '%s_fin.shp'%(filename))
outpath_reskml = os.path.join(result_dir, '%s.kml'%(filename))
outpath_resvrt = os.path.join(result_dir, '%s.vrt'%(filename))
outpath_upload = os.path.join(upload_dir, '%s.csv'%(filename))
return sourcepath,outpath_selcsv,outpath_selshp,outpath_selvrt,outpath_tmpshp,outpath_rescsv,outpath_resshp,outpath_reskml,outpath_resvrt,outpath_upload
#remove files
def silent_remove(filename):
if os.path.exists(filename):
os.remove(filename)
#authentification to FT (needed .json key saved on disk)
def auth2FT():
json_key = json.load(open('d:\\Thematic\\Peatfires\\Python\\import_export_csv2ft\\iggkey.json'))
scope = ['https://www.googleapis.com/auth/fusiontables']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
http = httplib2.Http()
http = credentials.authorize(http)
#TODOcheck what is build
service = build("fusiontables", "v1", http=http)
return(service)
#authentification to drive
def auth2drive():
json_key = json.load(open('d:\\Thematic\\Peatfires\\Python\\import_export_csv2ft\\iggkey.json'))
scope = ['https://www.googleapis.com/auth/drive']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
http = httplib2.Http()
http = credentials.authorize(http)
#TODOcheck what is build
service = build("drive", "v3", http=http)
return(service)
#def get session
def get_session(url):
url = url
s = requests.session()
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Referer': url,
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'}
r = s.get(url, headers=headers)
return(r)
#read file from site and save to csv
def read_csv_from_site(url):
r = get_session(url)
reader = csv.reader(r.iter_lines(), delimiter=',', quotechar='"')
outfile=open(sourcepath, 'wb')
tmp = csv.writer(outfile)
tmp.writerows(reader)
#intersect with polygons layer using ogr2ogr
def sp_join(filename):
try:
#convert to shp
#create vrt and convert to shp
f = open(outpath_selvrt, 'w')
f.write("<OGRVRTDataSource>\n")
f.write(" <OGRVRTLayer name=\"%s_tmp\">\n" % (filename))
f.write(" <SrcDataSource relativeToVRT=\"1\">%s</SrcDataSource>\n" % (source_dir))
f.write(" <SrcLayer>%s</SrcLayer>\n" % (filename))
f.write(" <GeometryType>wkbPoint</GeometryType>\n")
f.write(" <LayerSRS>WGS84</LayerSRS>\n")
f.write(" <GeometryField encoding=\"PointFromColumns\" x=\"longitude\" y=\"latitude\"/>\n")
f.write(" </OGRVRTLayer>\n")
f.write("</OGRVRTDataSource>\n")
f.close()
#convert
command="ogr2ogr -overwrite -skipfailures -f \"ESRI Shapefile\" %s %s && ogr2ogr -overwrite -f \"ESRI Shapefile\" %s %s" % (source_dir,sourcepath,source_dir,outpath_selvrt)
print(command)
os.system(command)
#intersect
#command = "ogr2ogr -overwrite -lco encoding=UTF-8 -sql \"SELECT ST_Intersection(A.geometry, B.geometry) AS geometry, A.*, B.* FROM %s_tmp A, %s B WHERE ST_Intersects(A.geometry, B.geometry)\" -dialect SQLITE %s %s -nln %s_tmp1" % (filename,filename_peatlands,source_dir,source_dir,filename)
#print(command)
#os.system(command)
#conver back to csv
#command = "ogr2ogr -overwrite -skipfailures -f CSV %s %s" % (outpath_selcsv,os.path.join(source_dir, '%s_tmp1.shp'%(filename)))
#print(command)
#os.system(command)
except:
print('An error occured..')
#spatial join using PSQL
def sp_join_postgres(filename):
#connecting to database
conn_string = "host='localhost' dbname='gisbase' user='postgres' password='terra'"
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
try:
#cursor.execute('DROP TABLE %s_tmp CASCADE' %(filename))
command = "shp2pgsql -a -s 4326 -I -c -W UTF-8 %s > %s" % (os.path.join(source_dir, '%s_tmp.shp'%(filename)),os.path.join(source_dir, '%s.sql'%(filename)))
print(command)
os.system(command)
cursor.execute(open('%s'% os.path.join(source_dir, '%s.sql'%(filename)), "r").read())
except:
print('err')
try:
cursor.execute('ALTER TABLE %s_tmp DROP COLUMN gid'%(filename))
cursor.execute('INSERT INTO %s_russia SELECT * FROM %s_tmp'%(filename.split('_')[0],filename))
conn.commit()
except:
print('err')
#cursor.execute('DROP TABLE %s_jn'%(filename))
try:
cursor.execute('CREATE TABLE %s_jn AS SELECT %s_tmp.*, mask2.gid as mgid, mask2.unique_id, mask2.type AS type,mask2.district,mask2.region FROM mask2, %s_tmp WHERE ST_Intersects(mask2.geom, %s_tmp.geom)'%(filename,filename,filename,filename))
except:
print('err')
cursor.execute('SELECT * FROM %s_jn'%(filename))
res=cursor.fetchall()
#print(res)
res=pd.DataFrame(res)
res.to_csv(outpath_selcsv, sep=',', index=False,header=False)
cursor.execute('DROP TABLE %s_tmp,%s_jn CASCADE' %(filename,filename))
conn.commit()
cursor.close()
conn.close()
#find last rowId from FT
def find_last_id_in_FT():
reqrowid = "SELECT ROWID FROM " + tableId
request = service.query().sql(sql=reqrowid)
rsp = request.execute()
lastid = int(len(rsp["rows"]))
return(lastid)
#compose new fields and create final csv ready to upload with the same columns as in FT
def create_csv_to_upload(filename):
if os.path.isfile(outpath_selcsv):
csvday = pd.read_csv(outpath_selcsv,header=None, names=('latitude','longitude','bright_ti4','scan','track','acq_date','acq_time','satellite','confidence','version','bright_ti5','frp','daynight','geom','id','unique_id','type','region','district'))
tmppath = os.path.join(result_dir, 'tmp.csv')
tmpcsv = []
lastid = find_last_id_in_FT()
for row in csvday.iterrows():
id = row[0]+lastid
lat = row[1]['latitude']
lon = row[1]['longitude']
acq_date = row[1]['acq_date']
acq_time = row[1]['acq_time']
peat = row[1]['unique_id']
region = ''
obl = row[1]['region']
raion = row[1]['district']
conf = row[1]['confidence']
type1 = row[1]['type']
type = filename.split('_')[0]
note = ''
whouploaded = 'GProbot'
who = ' '
link = "https://drive.google.com/drive/u/0/folders/0B2diDVTPYguodkhqOE9pRXllVlE"
status_no = 1
line = str(id) + "," + str(lat) + "," + str(lon) + "," + acq_date + "," + str(acq_time) + "," + str(peat) + "," + str(type1) + "," + region + "," + obl + "," + raion + "," + str(conf) + "," + type + "," + whouploaded + "," + who + "," + str(status_no) + "," + note + "," + link
tmpcsv.append(line)
else:
print("File doesn't exist")
try:
tmpf = open(tmppath,'wb')
for line in tmpcsv:
tmpf.write(line + '\n')
tmpf.close()
df = pd.read_csv(tmppath,header=None, names=('GlobID', 'lat', 'lon','acq_date','acq_time','peat','type1','reg','obl','rai','confidence', 'type', 'whouploaded', 'who','status_no', 'note','link'))
df.to_csv(outpath_upload, sep=',', encoding='utf-8', index=False,header=False)
df.to_csv(outpath_rescsv, sep=',', encoding='utf-8', index=False,header=True)
silent_remove(tmppath)
except:
print('Err')
#convert to kml
def convert2kml(filename):
try:
#create vrt and convert to shp
f = open(outpath_resvrt, 'w')
f.write("<OGRVRTDataSource>\n")
f.write(" <OGRVRTLayer name=\"%s_fin\">\n" % (filename))
f.write(" <SrcDataSource relativeToVRT=\"1\">%s</SrcDataSource>\n" % (result_dir))
f.write(" <SrcLayer>%s</SrcLayer>\n" % (filename))
f.write(" <GeometryType>wkbPoint</GeometryType>\n")
f.write(" <LayerSRS>WGS84</LayerSRS>\n")
f.write(" <GeometryField encoding=\"PointFromColumns\" x=\"lon\" y=\"lat\"/>\n")
f.write(" </OGRVRTLayer>\n")
f.write("</OGRVRTDataSource>\n")
f.close()
command = "ogr2ogr -overwrite -skipfailures -f \"ESRI Shapefile\" %s %s && ogr2ogr -overwrite -f \"ESRI Shapefile\" %s %s" % (result_dir,outpath_rescsv,result_dir,outpath_resvrt)
os.system(command)
#conver to kml
command = "ogr2ogr -overwrite -skipfailures -f KML %s %s" % (outpath_reskml,outpath_resshp)
os.system(command)
except:
print('An error occurs during the convertation %s' % (outpath_rescsv))
#upload kml to drive
def upload_to_drive(outpath_reskml):
try:
if os.path.isfile(outpath_reskml):
print(outpath_reskml)
file_metadata = {
'name' : '%s'%(os.path.basename(outpath_reskml)),
'parents': [ folder_id ]}
media=MediaFileUpload(outpath_reskml,mimetype='text/csv', resumable=True)
request = servicedrive.files().create(body=file_metadata, media_body=media, fields='id')
nms =request.execute()
else:
print('File not exist')
except:
#time.sleep(60)
print('An error occurs during the uploading file %s' % (outpath_reskml))
#upload csv to FT
def upload_to_FT(outpath_upload):
try:
if os.path.isfile(outpath_upload):
print(os.path.basename(outpath_upload))
media=MediaFileUpload(outpath_upload,mimetype='application/octet-stream', resumable=True)
request = service.table().importRows(tableId=tableId,media_body=media)
nms =request.execute()
else:
print('File not exist')
except:
#time.sleep(60)
print('An error occurs during the uploading file %s' % (os.path.basename(outpath_upload)))
if __name__ == "__main__":
while True:
log=os.path.join(source_dir, 'log.txt')
logf = open(log, 'a')
#current date!!TODO make normal format
start=time.time()
currtime = time.localtime()
date=time.strftime('%d%m%Y',currtime)
cdate=time.strftime('%d-%m-%Y %H:%M:%S',currtime)
logf.write(cdate)
#set modis filename
filename_modis = 'modis_%s' % (date)
#set viirs filename
filename_viirs = 'viirs_%s' % (date)
print 'Process started at %s'%(cdate)
#create dictionary
sat_url={filename_modis:URL_MOD_FIRE_SHAPES,filename_viirs:URL_VII_FIRE_SHAPES}
#build servises for uploading
service = auth2FT()
servicedrive = auth2drive()
#start workflow for modis and viirs
for filename,url in sat_url.iteritems():
sourcepath,outpath_selcsv,outpath_selshp,outpath_selvrt,outpath_tmpshp,outpath_rescsv,outpath_resshp,outpath_reskml,outpath_resvrt,outpath_upload=set_outputs(filename)
read_csv_from_site(url)
sp_join(filename)
sp_join_postgres(filename)
#create csv
create_csv_to_upload(filename)
convert2kml(filename)
upload_to_drive(outpath_reskml)
upload_to_FT(outpath_upload)
#for tmpfile in glob.glob(os.path.join(source_dir, '*_tmp*')):
#os.remove(tmpfile)
#if os.path.isfile(outpath_selcsv):
#os.remove(outpath_selcsv)
#else:
#continue
#sleep for 24h+1sec
end=time.time()
sleep=86401-(end-start)
logf.close()
time.sleep(sleep)
| gpl-3.0 |
google/qkeras | experimental/lo/optimizer.py | 1 | 9825 | # Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements random forest or logic otimizer function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing as mp
import os
import pickle
import random
import shutil
import subprocess
import sys
import time
import warnings
import numpy as np
import six
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from .compress import Compressor
from .generate_rf_code import gen_random_forest
from .table import load
def file_compress(fin, fout):
"""Compresses table using hash set."""
c = Compressor()
n_lines = 0
for line in open(fin):
n_lines += 1
line = line.strip()
c.add_entry(line)
f = open(fout, "w")
n_compressed = 0
for line in c():
n_compressed += 1
f.write(line + "\n")
f.close()
print("... random forrest for {} reduced from {} to {} entries".format(
os.path.basename(fin), n_lines, n_compressed))
def mp_rf_optimizer_func(fn_tuple):
"""Executes in parallel creation of random forrest creation."""
fn, flags, file_suffix = fn_tuple
n_trees = flags["n_trees"]
is_regressor = flags["is_regressor"]
sample_size = flags["sample_size"]
n_features = flags["n_features"]
max_depth = flags["max_depth"]
if not file_suffix:
file_suffix = "none"
path_split = fn.split("/")
path = "/".join(path_split[:-1]) + "/"
fn_split = path_split[-1].split(".")
# o_file = path + ".".join(fn_split[0:-2] + [fn_split[-1]])
cv_file = path + ".".join(fn_split[0:-2] + [file_suffix])
rfb_file = path + ".".join(fn_split[0:-2] + ["rb", "bin"])
# let's compress the table first to make the job easier for random forest.
# compression can usually achieve a ratio of 50x or more.
# compress(fn, o_file)
train = load(fn)
n_features = "auto" if not n_features else float(n_features)
# min_size = 1
if max_depth:
max_depth = int(max_depth)
print("... creating random forrest for " + os.path.basename(fn) + " with " +
str(sample_size) + " samples")
if is_regressor:
rf = RandomForestRegressor(
n_estimators=n_trees,
max_depth=max_depth,
# min_samples_split=2,
# min_samples_leaf=min_size,
max_features=n_features,
# max_leaf_nodes=100,
# oob_score=True,
# warm_start=True,
bootstrap=True,
random_state=42,
n_jobs=1)
else:
rf = RandomForestClassifier(
n_estimators=n_trees,
max_depth=max_depth,
# min_samples_split=2,
# min_samples_leaf=min_size,
max_features=n_features,
# max_leaf_nodes=100,
# oob_score=True,
# warm_start=True,
bootstrap=True,
random_state=42,
n_jobs=1)
if sample_size and train.shape[0] >= 10000:
sample_size = int(sample_size)
np.random.seed(42)
idx = np.random.choice(train.shape[0], train.shape[0], replace=False)
x = train[idx[sample_size:], 0:-1]
y = train[idx[sample_size:], -1]
x_test = train[idx[0:sample_size], 0:-1]
y_test = train[idx[0:sample_size], -1]
else:
x = train[:, 0:-1]
y = train[:, -1]
x_test = x
y_test = y
estimators = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rf.fit(x, y)
func_name = fn_split[0]
bits = np.ceil(
np.log2(
np.abs(
np.amax(x, axis=0) -
np.amin(x, axis=0) + 1))).astype(np.int32)
is_neg = (np.amin(x, axis=0) < 0).astype(np.int8)
o_bits = np.ceil(
np.log2(
np.abs(
np.amax(y, axis=0) -
np.amin(y, axis=0) + 1))).astype(np.int32)
o_is_neg = (np.amin(y, axis=0) < 0).astype(np.int8)
rf.bits = bits
rf.is_neg = is_neg
rf.o_bits = o_bits
rf.o_is_neg = o_is_neg
code = gen_random_forest(
rf, func_name, bits, is_neg, o_bits, o_is_neg,
is_regressor=is_regressor, is_top_level=False,
is_cc=file_suffix == "cc")
open(cv_file, "w").write("\n".join(code))
p = 1.0 * np.round(rf.predict(x_test))
dy = np.max(train[:, -1]) - np.min(train[:, -1])
error = np.sum(np.abs(y_test - p)) / (1.0 * p.shape[0] * dy)
score = np.sum(y_test == p) / p.shape[0]
print("y:", np.max(y_test), y_test[0:30].astype(np.int32))
print("p:", np.max(p), p[0:30].astype(np.int32))
print("... model {} with score of {:.2f}% and error of {:.2f}%".format(
func_name, 100.0*score, 100.0*error))
print("... saving model in {}".format(rfb_file))
pickle.dump(rf, open(rfb_file, "wb"))
return rfb_file
def mp_abc_optimizer_func(fn):
"""Performs espresso and abc optimization on a single espresso input."""
fn_split = fn.split(".")
o_file = ".".join(fn_split[0:-2] + [fn_split[-1]])
v_file = ".".join(fn_split[0:-2] + ["v"])
b_file = ".".join(fn_split[0:-2] + ["blif"])
print("...running espresso in " + fn)
espresso_flags = os.environ.get("ESPRESSO_FLAGS", "-Dexpand")
cmd = "espresso {} {} > {}".format(fn, espresso_flags, o_file)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
output = output.strip()
if output:
print(output)
sys.stdout.flush()
# check if network is empty
for line in open(o_file):
line = line.strip()
if line[0:2] == ".p":
terms = int(line[2:])
# empty : espresso optimized away all the logic
if terms == 0:
shutil.copyfile(fn, o_file)
break
print("...running abc in " + o_file)
abc_flags = os.environ.get("ABC_FLAGS", "")
abc_flags_list = abc_flags.split(";") if abc_flags else []
abc_cmds_list = (
["read_pla " + o_file] + abc_flags_list +
["strash",
"dc2",
"strash",
"if -K 3",
"write_verilog " + v_file,
"write_blif " + b_file
])
abc_cmds = ";".join(abc_cmds_list)
cmd = "abc -c '" + abc_cmds + "'"
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
output = output.strip()
if output:
print(output)
sys.stdout.flush()
print("...generated " + v_file)
def run_abc_optimizer(files):
"""Implements logic optimizer using espresso/abc."""
# intel processors sometimes return number of threads, not processors
cpus = mp.cpu_count() // 2
start_time = time.time()
pool = mp.Pool(cpus)
pool.map(mp_abc_optimizer_func, files)
pool.close()
print("Optimizer ran in {} seconds.".format(time.time() - start_time))
def run_rf_optimizer(files, flags, file_suffix="cc"):
"""Implements random forest main optimizer."""
# intel processors sometimes return number of threads, not processors
cpus = mp.cpu_count() // 2
start_time = time.time()
pool = mp.Pool(cpus)
pool.map(mp_rf_optimizer_func, zip(
files, [flags]*len(files), [file_suffix]*len(files)))
pool.close()
print("Optimizer ran in {} seconds.".format(time.time() - start_time))
# generates header file
# .../.../.../conv2d_0_m.csv/conv2d_0_m_0.csv
#
# returns conv2d_0_m for module_name
module_name = files[0].split("/")[-2].split(".")[0]
path_split = files[0].split("/")
path = "/".join(path_split[:-1]) + "/"
fn_split = path_split[-1].split(".")
rfb_file = path + ".".join(fn_split[0:-2] + ["rb", "bin"])
rf = pickle.load(open(rfb_file, "rb"))
f = open(path + module_name + "." + file_suffix, "w")
if file_suffix == "cc":
f.write("#include <ac_int.h>\n\n")
modules = []
for fn in files:
path_split = fn.split("/")
path = "/".join(path_split[:-1]) + "/"
fn_split = path_split[-1].split(".")
v_file = ".".join(fn_split[0:-2] + [file_suffix])
func_name = fn_split[0]
if file_suffix == "v":
f.write("'include \"" + v_file + "\"\n")
else:
f.write("#include \"" + v_file + "\"\n")
modules.append(func_name)
f.write("\n\n")
if file_suffix == "v":
f.write("module " + module_name + "(")
f.write("input [" + str(np.sum(rf.bits)-1) + ":0] in, ")
o_sign = " signed " if rf.o_is_neg else ""
f.write("output " + o_sign + "[" + str(len(modules)*rf.o_bits-1) +
":0] out);\n")
else:
f.write("void " + module_name + "(")
f.write("ac_int<" + str(np.sum(rf.bits)) + ",false> in, ")
f.write("ac_int<" + str(len(modules)*rf.o_bits) + "," +
("true" if rf.o_is_neg else "false") +
"> &out)\n")
f.write("{\n")
for o in range(len(modules)):
if file_suffix == "v":
f.write(" wire " + ("signed " if rf.o_is_neg else "") +
"[" + str(rf.bits[-1]-1) + ":0] "
"o_" + str(o) + ";\n")
f.write(" " + modules[o] + "(in, o_" + str(o) + ");\n")
f.write(" assign out[" + str(rf.o_bits*(o+1)-1) + ":" +
str(rf.bits[-1]*o) + "] = o_" + str(o) + ";\n")
else:
f.write(" ac_int<" + str(rf.o_bits) + "," +
("true" if rf.o_is_neg else "false") +
"> o_" + str(o) + "; " + modules[o] +
"(in, o_" + str(o) + "); out.set_slc<" +
str(rf.o_bits) + ">(" +
str(rf.o_bits*o) + "," +
"o_" + str(o) + ");\n")
if file_suffix == "cc":
f.write("}")
f.close()
| apache-2.0 |
eggimasv/EcoDen | Python_Files/functions_EcoDen.py | 1 | 101952 | # ======================================================================================
# Copyright 2016 Swiss Federal Institute of Aquatic Science and Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# Literature
# ==========
# Eggimann, S., Truffer, B., Maurer, M., 2016. Economies of densities for waste water treatment. Water Research, 101, 476489.
#
# Contact: [email protected]
# Version 1.0
# Date: 3.6.2016
# Autor: Eggimann Sven
# ======================================================================================
import math, random, numpy, copy # Import Numpy for standard deviation
import numpy as np
print("Sucseeful loading functions_transport")
def testVisualizeCatchement(scheduledRoute, InitialDepot, sinks):
#----------------------
#####TESET
#----------------------
testDistanceOneRound = 0
distanceDepotCenter = 0
totalTime = 0
cnt = 0
for i in scheduledRoute:
if cnt == 1:
distn = distanceCalc2dKmFactor(old, (i[1], i[2]), 1) # Distance to clsest sink from current OST
totalTime += distn / float(vtruck)
testDistanceOneRound += distn
old = (i[1], i[2])
cnt = 1
# calc average distance to depot center
for i in scheduledRoute:
d = distanceCalc2dKmFactor(InitialDepot, (i[1], i[2]), 1) # Distance to clsest sink from current OST
distanceDepotCenter += d
print("-----")
print("TEST CALCULATIONS Total Distance one round: " + str(testDistanceOneRound))
print("Dist per OST: " + str(testDistanceOneRound/len(scheduledRoute)))
print("TOTTIME: " + str(totalTime))
print("Anzahl OST: " + str(len(scheduledRoute)))
print("Aerage distance ode pot: " + str(distanceDepotCenter/len(scheduledRoute)))
import numpy as np
import matplotlib.pyplot as plt
xD = []; yD=[]
for point in scheduledRoute:
xD.append(point[1])
yD.append(point[2])
plt.scatter(xD,yD)
print("InitialDepot:" + str(InitialDepot))
depotX, depotY = InitialDepot[0], InitialDepot[1]
plt.scatter(depotX,depotY, s=80, color='red') # Depot
xSinks, ySinks = [],[]
for i in sinks:
xSinks.append(i[0])
ySinks.append(i[1])
plt.scatter(xSinks,ySinks, s=20, color='green') # sinks
plt.show()
def roundTo10Steps(x, base=5):
''' Rounding Function to 10 steps
Input:
x - Number
base - Rounding to 10 steps
Output:
rounded number
'''
return int(base * round(float(x) / base))
def getDistanceFactor(r, percentSA):
'''
depending on circle radius get distance factor
Input:
r - Circle Radius
percentSA - Percentage for Sensitivity Analysis
Output:
fdScheduled - Distance Factor scheduled
fdUnscheduled - Distance Factor unscheduled
'''
fdScheduled = (0.0004 * r + 1.8536) * percentSA # Derived distance weighting from case study
fdUnscheduled = (-0.0005 * r + 1.2775) * percentSA # Derived distance weighting from case study
return fdScheduled, fdUnscheduled
def distanceCalc2dKmFactor(p0, p1, df):
''' Calculate 2d Distance'''
distance = math.hypot(p0[0] - p1[0], p0[1] - p1[1])
km = (float(distance) / 1000) * df
return km
def fromOldNodeToNewNode(currentOST, newOST, df, totTravelDistance, totTravelTime, hPersonellOperation, travelSpeed, notAlreadyAtnewNode):
if notAlreadyAtnewNode == False:
dist = distanceCalc2dKmFactor(currentOST, newOST, df) # Distance from former position to new position
travelTime = dist / float(travelSpeed) # Calculate Travel Time
totTravelDistance += dist # Add Travel distance to total distance
totTravelTime += travelTime
hPersonellOperation += travelTime # Add Travel Time to current travel time
return totTravelDistance, totTravelTime, hPersonellOperation
def transformNetworkToDepot(streetVertices, depotCircle, streetNetworkCenter):
'''
transform street network in order that center is the circle center
'''
streetVerticesNew = []
XShift = depotCircle[0] - streetNetworkCenter[0]
YShift = depotCircle[1] - streetNetworkCenter[1]
# Correct Vertices
for i in streetVertices:
newX = i[1] + XShift
newY = i[2] + YShift
z = [i[0], newX, newY, 0]
streetVerticesNew.append(z)
return streetVerticesNew
def checkIfAtHome(pemp):
''' check if anybody is at home'''
randomDistr = random.uniform(0, 1)
if randomDistr < pemp:
atHome = True
else:
atHome = False
return atHome
def emptyWWTPsInList(WWTPList, WWTP):
'''
This functions empties WWTPs in a list
WWTPstoEmpty - List with WWTPs
WWTP - WWTP to empty
'''
import copy
WWTPEmpty = copy.copy(WWTP)
# Emtpy WWTPs
for i in WWTPList:
for e in WWTPEmpty:
if e[0] == i[0]:
e[8] = 0
break
return WWTP
def controlA(WWTPstoEmpty):
''' control function'''
#print(WWTPstoEmpty)
for f in WWTPstoEmpty:
if f[8] != 0:
print("ERROR: NOT WWTP EMPTIED")
prnt(".")
def algorithmEvacuation(punkte, initialDepot, tpmax, sinks, travelSpeed, temptyTruck, df, cltruck, temptyOST, pemp):
''' This function calcululates the costs of emptying WWTPs with always checking if capacity reached and if yes travel to sink.
Input
punkte - WWWTs in correct tour order
initialDepot - Initial Depot
tpmax - Max Working hours
sinks - Deposits
travelSpeed - Vehicle Travel Speed
temptyTruck - Time needed to empty truck
df - Distance Factor
cltruck - Load Truck
temptyOST - Time for emptying OST
pemp - Emptying probability
Output:
totTravelDistance - Total travelled distance
totTravelTime - Total travelled time
totWorkingTime - Total working time
'''
positionInWWTP, totTravelDistance, hPersonnel, totTravelTime, loadedOnTrucksOperation, totWorkingTime = 0, 0, 0, 0, 0, 0 # Parameters
statistics_NrOfDEPOVISIT, statistics_Ausnahme = 0, 0
visitedWWTP = [] # Visitied WWTPs
currentOST = initialDepot # Initial copy
emptyBecasueFarAwayOST = False
while len(visitedWWTP) < len(punkte): # As long not all WWTP have been visited
#print("start " + str(emptyBecasueFarAwayOST))
for i in punkte:
if i[0] not in visitedWWTP:
#print("NEW OST---------- " + str(i))
#print("old OST--------- " + str(currentOST))
#print("hPersonnel: " + str(hPersonnel))
#print("lenVISIT: " + str(len(visitedWWTP)))
newWWTPCoordinates = (i[1], i[2]) # Coordinates of WWTP to serve next
notAlreadyAtnewNode = False
# Check if max work a day is reached. If yes, drive to depot and empty and return to new node the next day
farAwayOST, hPersonnel, totTravelTime, totTravelDistance, loadedOnTrucksOperation, totWorkingTime, returnedToDepot, statistics_NrOfDEPOVISIT, statistics_Ausnahme = checkIfDayReached(statistics_Ausnahme, statistics_NrOfDEPOVISIT, emptyBecasueFarAwayOST, currentOST, newWWTPCoordinates, initialDepot, tpmax, loadedOnTrucksOperation, sinks, hPersonnel, totTravelTime, totTravelDistance, travelSpeed, totWorkingTime, temptyTruck, df, temptyOST)
if returnedToDepot == True: #
#print("Return to depot..." + str(initialDepot))
currentOST = initialDepot # Start iterating WWTPs again to test if any WWTP was missed because of somebody not beeing at home)
break # Start iterating path again
# Check if OST emptying os possible. If no, truck still drives there
atHome = checkIfAtHome(pemp) # Check if anyone is at home
if atHome == False: # Evacuation not possible
#print("not at home")
totTravelDistance, totTravelTime, hPersonnel = fromOldNodeToNewNode(currentOST, newWWTPCoordinates, df, totTravelDistance, totTravelTime, hPersonnel, travelSpeed, notAlreadyAtnewNode)
currentOST = (newWWTPCoordinates) # Truck is not at OSt which cannot be emptied
continue
#print(len(visitedWWTP))
#print("hPersonel: " + str(hPersonnel))
#print("totTravelTime: " + str(totTravelTime))
#print("totTravelDistance: " + str(totTravelDistance))
#print("totWorkingTime: " + str(totWorkingTime))
#print("loadedOnTrucksOperation: " + str(loadedOnTrucksOperation))
#print("returnedToDepot: " + str(returnedToDepot))
# Until current WWsTP is fully emptied
totTravelDistance, totTravelTime, hPersonnel = fromOldNodeToNewNode(currentOST, newWWTPCoordinates, df, totTravelDistance, totTravelTime, hPersonnel, travelSpeed, notAlreadyAtnewNode)
capacityReached, stillEmptyCapacity = checkIfthereIsSpaceForAnotherWWTP(loadedOnTrucksOperation, cltruck, i) # Check if truck capacity is reached
punkte, loadedOnTrucksOperation, hPersonnel, totWorkingTime = emptyWWTP(punkte, newWWTPCoordinates, stillEmptyCapacity, loadedOnTrucksOperation, temptyOST, totWorkingTime, hPersonnel) # Empty as much as possible in next WWTP.
#print(".." + str(totTravelDistance))
#print(".." + str(stillEmptyCapacity))
#print(".." + str(hPersonnel))
if capacityReached == 1 or farAwayOST == True: # Either WWTP not empty or farAwayOST
#print("Truck full or farAwayOST: " + str(farAwayOST))
SinkCoordainte = getClosestSink(newWWTPCoordinates, sinks, df) # Search closest sink
distanceToSink = distanceCalc2dKmFactor(newWWTPCoordinates, SinkCoordainte, df) # distance to Sink
travelTime = distanceToSink / float(travelSpeed) # Travel time to Sink
totTravelTime += travelTime # Summen time
hPersonnel += travelTime # Summen current daily time
totTravelDistance += distanceToSink # summen total travelled distance
loadedOnTrucksOperation, totWorkingTime = emptyAtSink(loadedOnTrucksOperation, totWorkingTime, temptyTruck) # Empty Sink
currentIsEmpty = checkIfCurrentWWTPIsEmpty(punkte, i) # If new is empty, drive to new otherwise keep continung emptying same wwtp
#print("currentIsEmpty: " + str(currentIsEmpty))
if farAwayOST == False: # If regular OST
currentOST = (SinkCoordainte) # Current position
if currentIsEmpty == True: # If regular OST is empty
#print("A")
_ = 0
else:
#print("B")
# Start iterating again (then a half-empty OST is first found
break # start iteration again
else:
if currentIsEmpty == True: # WWTP is empty, return immediatly to Depot because it is a FarAway OST
#print("C")
# return immediately to Depot
distanceToDepotOrig = distanceCalc2dKmFactor(SinkCoordainte, initialDepot, df) # distance to Sink
travelTimeToDepotOrig = distanceToDepotOrig / float(travelSpeed) # Travel time to Sink
totTravelTime += travelTimeToDepotOrig # Summen time
hPersonnel += travelTimeToDepotOrig # Summen current daily time
totTravelDistance += distanceToDepotOrig # summen total travelled distance
currentOST = (initialDepot) # Current position
#print("totTravelTime: " + str(totTravelTime))
#print("hPersonnel: " + str(hPersonnel))
#print("totTravelDistance: " + str(totTravelDistance))
#print("currentOST: " + str(currentOST))
hPersonnel = 0 # Set to zero again
emptyBecasueFarAwayOST = False
else: # Continue emptying until all is empties
#print("D")
emptyBecasueFarAwayOST = True
currentOST = (SinkCoordainte) # Current position
break # start iterating again
else: # Capacity not reach in truck
currentIsEmpty = True # If truck is not full after the whole tank was emptied, the OSt is certainly empty. Then move to next OST
currentOST = (newWWTPCoordinates) # punkte last visited
# Return to depot a the very end of iteration
if positionInWWTP == len(punkte)-1:
#print("Final return to the depot")
# Search closest sink, empty tank
SinkCoordainte = getClosestSink(newWWTPCoordinates, sinks, df) # Drive from new to sink
dist = distanceCalc2dKmFactor(newWWTPCoordinates, SinkCoordainte, df)
travelTime = dist / float(travelSpeed)
totTravelTime += travelTime
hPersonnel += travelTime
totTravelDistance += dist
loadedOnTrucksOperation, totWorkingTime = emptyAtSink(loadedOnTrucksOperation, totWorkingTime, temptyTruck) # Empty Sink
# Drive from Sink to depot
dist = distanceCalc2dKmFactor(SinkCoordainte, initialDepot, df)
totTravelDistance += dist
travelTime = dist / float(travelSpeed)
totTravelTime += travelTime
#print("Add to visited")
positionInWWTP += 1
visitedWWTP.append(i[0])
#print("ZUWACHS: " + str(totTravelDistance - totTravelDistanceOld))
#print("===========================================================")
#print("STATS ROUTE")
#print("statistics_Ausnahme: " + str(statistics_Ausnahme))
#print("statistics_NrOfDEPOVISIT: " + str(statistics_NrOfDEPOVISIT))
#print("LEN PUNKTdddE: " + str(punkte))
return totTravelDistance, totTravelTime, totWorkingTime
def algorithmServiceTour(punkte, Depot, InitialDepot, tpmax, travelSpeed, df, tservice):
''' This function calculates costs of travelling to a OST an performing tasks
punkte - OST in correct order
Depot - Depot
InitialDepot - InitialDepot
tpmax - maxium working time
sinks
travelSpeed
df
tservice - Service time at WWTP
Output:
totTravelDistance - Tot travel distance
totTravelTime - Tot travel time
totWorkingTime - tot working time
'''
positionInWWTP, totTravelDistance, totTravelTime, totWorkingTime, hPersonell = 0, 0, 0, 0, 0 # Parameters
for i in punkte:
newWWTPCoordinates = (i[1], i[2]) # Coordinates of WWTP to serve next
# Check if max work a day is reached. If yes, drive to depot and empty and return to new node the next day
hPersonell, totWorkingTime, totTravelTime, totTravelDistance, returnToDepot = checkServiceTimeConstraint(Depot, newWWTPCoordinates, InitialDepot, tpmax, hPersonell, totTravelTime, totTravelDistance, travelSpeed, df, totWorkingTime, tservice)
if returnToDepot == True:
Depot = InitialDepot
else:
Depot = (newWWTPCoordinates) # punkte last visited
# Return to depot a the very end of iteration
if positionInWWTP == len(punkte) - 1:
dist = distanceCalc2dKmFactor(newWWTPCoordinates, InitialDepot, df)
totTravelDistance += dist
travelTime = dist / float(travelSpeed)
totTravelTime += travelTime
positionInWWTP += 1
return totTravelDistance, totTravelTime, totWorkingTime
def clarkeAndWright(WWTPInput, InitialdepotOperation, df):
'''
Input:
Output:
'''
#print("start creating clakre & wright" + str(len(WWTPInput)))
#print("----")
WWTP = copy.deepcopy(WWTPInput)
ID_INITALDEPOT, ID_INITALDEPOT_orig = -1, -1
depotClarke = (ID_INITALDEPOT, InitialdepotOperation[0], InitialdepotOperation[1]) # Clark Depot
nrOfConnectionNodes, path, connectedNodes = len(WWTP), {}, []
distanceMatrix = calculateDistanceMatrix(WWTP, depotClarke, df) # Calculate distance matrix
#print("Distance Matrix is calculated: ")
#print("DEBOP: " + str(depotClarke))
savingList = calculateSavings(distanceMatrix, depotClarke, WWTP) # Calculate savings
#print("FF: " + str(savingList[:10]))
#print("Savingas are calculated: " + str(len(savingList)))
savingList, FROMNODE, TONODE = getMostSaving(savingList) # Connect initial triangle. Find node with most savings
dFROMNODE = distanceMatrix[depotClarke[0]][FROMNODE]
dTONODE = distanceMatrix[depotClarke[0]][TONODE]
DINBETWEEN = distanceMatrix[FROMNODE][TONODE]
path[depotClarke[0]] = {FROMNODE: dFROMNODE}
path[FROMNODE] = {TONODE: DINBETWEEN}
path[TONODE] = {depotClarke[0]: dTONODE}
# new and hopefully faster
savingList.sort()
savingList = savingList[::-1]
# Connect the rest of the points
if len(WWTPInput) > 2: # Otherwise only one option (we do not consider direction)
path = getMostSavingInPathDIRECT(nrOfConnectionNodes, savingList, path, distanceMatrix, depotClarke, connectedNodes) # Generates saving edge
WWTsavingListEW = []
new = 0
while len(WWTsavingListEW) != len(WWTPInput): #new != ID_INITALDEPOT_orig:
toID = path[ID_INITALDEPOT]
for e in toID:
new = e
break
if new == ID_INITALDEPOT_orig:
# Get wwtp
for e in WWTP:
if e[0] == ID_INITALDEPOT and ID_INITALDEPOT != ID_INITALDEPOT_orig:
WWTsavingListEW.append(e)
break
break
else:
# Get wwtp
for e in WWTP:
if e[0] == new and new != ID_INITALDEPOT_orig:
WWTsavingListEW.append(e)
break
ID_INITALDEPOT = new
return WWTsavingListEW
def calculateDistanceMatrix(WWTP, depot, df):
'''Calculate distance matrix. 2D straight'''
WWTP.insert(0, depot) # Add depot
distanceMatrix, cnt = {}, 0 # Distance matrix
for fromNode in WWTP:
cnt += 1
distanceMatrix[fromNode[0]] = {} # Add empty dictionary
# En Reched of WWTP (no distance aclualtionp ossibel
if fromNode[0] == WWTP[-1][0]:
return distanceMatrix
for toNode in WWTP[cnt:]: # Calculate distances with djikstra between all pairs
distance = distanceCalc2dKmFactor((fromNode[1], fromNode[2]), (toNode[1], toNode[2]), df)
distanceMatrix[fromNode[0]][toNode[0]] = distance
def calculateSavings(distanceMatrix, depot, nodeList):
savingList, cnt = [], 1
for fromNode in nodeList:
if fromNode[0] == nodeList[-1][0]: # If last elements
return savingList
for ToNode in nodeList[cnt:]:
if fromNode[0] != ToNode[0] and fromNode[0] != depot[0]:
d_depotToSecondNode = distanceMatrix[depot[0]][fromNode[0]]
d_depotToFirstNode = distanceMatrix[depot[0]][ToNode[0]]
distancebetween = distanceMatrix[fromNode[0]][ToNode[0]]
savings = d_depotToFirstNode + d_depotToSecondNode - distancebetween
#print("---")
#print(d_depotToSecondNode)
#print(d_depotToFirstNode)
#print(distancebetween)
#print(savings)
savingList.append((savings, fromNode[0], ToNode[0], d_depotToFirstNode, d_depotToSecondNode, distancebetween))
cnt += 1
return savingList
def getMostSaving(savingList):
minDit = 0 # scrapdistance
zahler = -1
for i in savingList:
zahler += 1
if i[0] > minDit:
minDit, FROMNODE, TONODE = i[0], i[1], i[2] # shortest distance # the flow flows to this node. # the flow starts here and flows to fromnode
deletPosition = zahler
#print("M: " + str(minDit))
del savingList[deletPosition]
return savingList, FROMNODE, TONODE
def getMostSavingInPathDIRECT(nrOfConnectionNodes, savingList, P, distanceMatrix, depot, connectedIntermediatenodes):
initialCopysavingList = list(savingList)
depotCoordinate = depot[0]
#while nrOfConnectionNodes >= len(P):
#while nrOfConnectionNodes + 1 > len(P): # Added one because of depot
#while initialCopysavingList > 0:
#print("A: " + str(nrOfConnectionNodes))
while nrOfConnectionNodes + 1 != len(P):
#print("FORTSCHRITT: " + str(len(P)))
#print("len initi: " + str(len(initialCopysavingList)))
#print(len(P))
#print("..")
#print("SAVING LIST: " + str(savingList))
#if len(savingList) == 0:
if len(initialCopysavingList) == 0 or len(savingList) == 0:
print("ERROR with saving list: ")
prnt("........")
#print(nrOfConnectionNodes)
#print("===========================")
# Remove interconnecting connection from savingList
savingListNew = []
for i in initialCopysavingList:
if i[1] in P and i[2] in P:
_ = 0
else:
savingListNew.append(i)
#initialCopysavingList = copy.deepcopy(savingListNew)
initialCopysavingList = list(savingListNew)
#savingList = copy.deepcopy(initialCopysavingList)
savingList = list(initialCopysavingList)
#print("AFTER I: " + str(savingList))
# Iterate a long all pairs have been checked or a new connection took place
ext = 1
deletPositionSavingList = -1
while len(savingList) > 0 and ext == 1:
deletPositionSavingList += 1
#print("savingList[0]: " + str(savingList[0]))
FROMNODE, TONODE = savingList[0][1], savingList[0][2] # Get shortest distance
deletPosition = 0
del savingList[deletPosition] # Remove connection
# get possible nodes where a connection is possible
for i in P[depotCoordinate]:
fromDepot = i
break
for i in P:
for e in P[i]:
if e == depotCoordinate:
toDepot = i
break
#print("Endpunkte: " + str(toDepot) + " " + str(fromDepot))
#print("FROMNODE: " + str(FROMNODE) + " " + str(TONODE))
#print("connectedIntermediatenodes: " + str(connectedIntermediatenodes))
#print("----------------------------------------------------------------")
# If connetion between WWTP already exists
if FROMNODE == toDepot and TONODE == fromDepot or FROMNODE == fromDepot and TONODE == toDepot: # If not connecting possible nodes which can be connected
del initialCopysavingList[deletPosition] # Remove connection
else:
# Test if end nodes are next to new shortest saving path nodes
if FROMNODE == fromDepot or TONODE == fromDepot or FROMNODE == toDepot or TONODE == toDepot:
if FROMNODE == fromDepot or TONODE == fromDepot:
if FROMNODE == fromDepot:
if TONODE in connectedIntermediatenodes: # and FROMNODE in connectedIntermediatenodes:
continue # Return to while # Select next coordinate pair
dist = distanceMatrix[depotCoordinate][TONODE] # find distances
dist2 = distanceMatrix[FROMNODE][TONODE] # find distances
P[depotCoordinate] = {TONODE: dist} # Change P
P[TONODE] = {FROMNODE: dist2} # Change P
connectedIntermediatenodes.append(fromDepot) # List with already connected nodes
else:
if FROMNODE in connectedIntermediatenodes: # and FROMNODE in connectedIntermediatenodes:
continue # Select next coordinate pair
dist = distanceMatrix[depotCoordinate][FROMNODE] # find distances
dist2 = distanceMatrix[FROMNODE][fromDepot] # find distances
P[depotCoordinate] = {FROMNODE: dist} # Change P
P[FROMNODE] = {fromDepot: dist2} # Change P
connectedIntermediatenodes.append(fromDepot) # List with already connected nodes
if FROMNODE == toDepot or TONODE == toDepot:
if toDepot == FROMNODE:
if TONODE in connectedIntermediatenodes: # and FROMNODE in connectedIntermediatenodes:
continue # Select next coordinate pair
dist = distanceMatrix[toDepot][TONODE] # find distances
dist2 = distanceMatrix[depotCoordinate][TONODE] # find distances
P[toDepot] = {TONODE: dist} # Change P
P[TONODE] = {depotCoordinate: dist2} # Change P
connectedIntermediatenodes.append(toDepot) # List with already connected nodes
else:
if FROMNODE in connectedIntermediatenodes: # and FROMNODE in connectedIntermediatenodes:
continue
dist = distanceMatrix[FROMNODE][toDepot] # find distances
dist2 = distanceMatrix[depotCoordinate][toDepot] # find distances
P[toDepot] = {FROMNODE: dist} # Change P
P[FROMNODE] = {depotCoordinate: dist2} # Change P
connectedIntermediatenodes.append(toDepot) # List with already connected nodes
del initialCopysavingList[deletPositionSavingList] # Remove connection
ext = 0
return P
def distanceCalc2dKm(p0, p1):
''' Calculate 2d Distance'''
distance = math.hypot(p0[0] - p1[0], p0[1] - p1[1])
km = (float(distance) / 1000)
return km
def averageNearestNeighborClustering(buildings, areaCircle):
'''
input:
buildings - Buildings
areaCircle - Area of circle
Output:
ANN - ANN
'''
A = areaCircle * 1000000 # [m2]
nrNodes = len(buildings)
# Calculate nearest feature for each point
SumOfNearestNeigbhour = 0
for b in buildings:
# Window Search
nearDi = 99999999999999 # Nearest Distance of a point
for e in buildings: # Search distance to closest point
dst = distanceCalc2dKm((b[1], b[2]), (e[1], e[2]))
if dst != 0 and dst < nearDi:
nearDi = dst
SumOfNearestNeigbhour += nearDi
SumOfNearestNeigbhour = SumOfNearestNeigbhour * 1000 # [m] Needed in Meters
De = 0.5 / ((nrNodes / float(A)) ** 0.5) #
Do = SumOfNearestNeigbhour / nrNodes # distance in m
ANN = float(Do) / float(De) # ANN
# print("===========================")
# print("buildings: " + str(len(buildings)))
# print("De: " + str(De))
# print("Do: " + str(Do))
# print("A: " + str(areaCircle))
# print("CLUSTERING: " + str(ANN))
return ANN
def checkIfIsToocloseToBuilding(removedPntList, minInterWWTPDistance, random_X, random_Y):
for f in removedPntList:
innerWindowX_max = f[0] + minInterWWTPDistance * 1000 # [m]
innerWindowX_min = f[0] - minInterWWTPDistance * 1000 # [m]
innerWindowY_max = f[1] + minInterWWTPDistance * 1000 # [m]
innerWindowY_min = f[1] - minInterWWTPDistance * 1000 # [m]
if random_X < innerWindowX_max and random_X > innerWindowX_min and random_Y < innerWindowY_max and random_Y > innerWindowY_min: # KORRIGENDA OR ZU UND
isCloseToBuilding = True
break
else:
isCloseToBuilding = False
return isCloseToBuilding
def generateSinks(extent, minInterSinkDistance, r, SinkDensity):
'''
# This function creates random sinks depending on the density
Input:
extent - Extent [[topLeftPoint], [topRightPoint]]
minInterSinkDistance - Minimum Distance between sinks
r - Radius of Circle
SinkDensity - Nr of Sinks per km2
return
sinkList - Sinks
circleAre - Area of Cirlse [km2]
'''
sinkList = []
TopLefXGlobal, TopLefYGlobal, BottomRightXGlobal, BottomRightYGlobal = extent[0][0], extent[1][1], extent[1][0], extent[0][1] # Get Coordinates of Extent
# Center of Circle
centerExtentX = TopLefXGlobal + ((BottomRightXGlobal - TopLefXGlobal) / 2)
centerExtentY = BottomRightYGlobal + ((TopLefYGlobal - BottomRightYGlobal) / 2)
areaCircle = math.pi * pow(r, 2) # Circle Area
nrOfOST = int(SinkDensity * areaCircle) # Number of Points
# Add Distance in order that circle
TopLefXGlobal = centerExtentX - r * 1000
TopLefYGlobal = centerExtentY + r * 1000
BottomRightXGlobal = centerExtentX + r * 1000
BottomRightYGlobal = centerExtentY - r * 1000
# Create Random sinkList
while len(sinkList) < nrOfOST:
random_X = random.uniform(TopLefXGlobal, BottomRightXGlobal) # Select random X
random_Y = random.uniform(BottomRightYGlobal, TopLefYGlobal) # Selext random Y
randomPnt = [random_X, random_Y] # Random Point
distanceFromMiddleOfExtent = distanceCalc2dKm((centerExtentX, centerExtentY), (random_X, random_Y))
if distanceFromMiddleOfExtent < r:
# Check if minimum distance criteria is fulfilled
if len(sinkList) > 0:
dCrit = True
for i in sinkList:
distance = distanceCalc2dKm([random_X, random_Y], [i[0], i[1]])
if distance <= minInterSinkDistance:
dCrit = False
if dCrit == True:
sinkList.append(randomPnt)
else:
sinkList.append(randomPnt)
# If not a single sink was created, create sink at center
if len(sinkList) == 0:
sinkList = [[centerExtentX, centerExtentY]]
return sinkList, areaCircle
def checkIfDayReached(statistics_Ausnahme, statistics_NrOfDEPOVISIT, emptyBecasueFarAwayOST, currentOST, newWWTPCoordinates, Initialdepot, tpmax, loadedOnTrucks, sinks, hPersonell, totTravelTime, totTravelDistance, travelSpeed, totWorkingTime, temptyTruck, df, temptyOST):
''' THis function checks if max work a day is reached. IF yes, add distance to depot and return to new node'''
# Time for Depot to OST. Used to check if an exception is allowed
initialDepotToNewOST = distanceCalc2dKmFactor(Initialdepot, newWWTPCoordinates, df) # Distance to clsest sink from current OST
timeInitialDepotToNewOST = initialDepotToNewOST / float(travelSpeed) # Travel
# Current OST to closest sink
SinkCoordainte = getClosestSink(currentOST, sinks, df) # Closest sink from current OST
distToSink = distanceCalc2dKmFactor(currentOST, SinkCoordainte, df) # Distance to clsest sink from current OST
travelTimeCurrentOstToSINK = distToSink / float(travelSpeed) # Travel tim from current OST to closest sink
## All Distances needed for new OST
# from current Ost to new OST
distNewToOST = distanceCalc2dKmFactor(currentOST, newWWTPCoordinates, df) # Distance from current WWTP to depot
timeToNewOST = distNewToOST / float(travelSpeed) # Time neede from current OST to new OST
# from new Ost to closest Deposit
SinkCoordainteNewOST = getClosestSink(newWWTPCoordinates, sinks, df) # Closest sink from new OST
distToSinkNewOST = distanceCalc2dKmFactor(newWWTPCoordinates, SinkCoordainteNewOST, df) # Distance to clsest sink from new OST
travelTimeNewOST = distToSinkNewOST / travelSpeed # Travel tim from current OST to new sink
# from closest sink of new OST to Depot
distSinkToDepot = distanceCalc2dKmFactor(SinkCoordainteNewOST, Initialdepot, df) # Distance from current WWTP to depot
travelTimeNewSinkToDepot = distSinkToDepot / float(travelSpeed)
# Time from current OST
totTimeNeeded = timeToNewOST + temptyOST + travelTimeNewOST + travelTimeNewSinkToDepot + temptyTruck # from current ot new OST + emptying time + time to closest sink + time from cloest sink to depot
# Theoretical used to test for exception
totTimeNeededTheoretical = timeInitialDepotToNewOST + temptyOST + travelTimeNewOST + travelTimeNewSinkToDepot + temptyTruck # from current ot new OST + emptying time + time to closest sink + time from cloest sink to depot
#print("THEORETICAL TIME: " + str(totTimeNeededTheoretical))
#print(statistics_Ausnahme)
### If overtime from visist of exceptionally far away node based on current situation is smaller than
### overtime in case we woulc start at a depot, vitis the exceptionally far away node.
### Otherwise return to depot.
##print("theoreticalOverTimeIfStartedFromDepot: " + str(totTimeNeededTheoretical))
##print("OverTimeIfStartedFromCurrentWWTP: " + str(totTimeNeeded))
##print(Initialdepot)
##print(newWWTPCoordinates)
# In case a single OST is too far away to serve one day, make an exception
if totTimeNeededTheoretical > tpmax or emptyBecasueFarAwayOST == True:
statistics_Ausnahme += 1
#print("Emptying far away plant from current situation which results in heavy overtime " + str(tpmax))
#print(totTimeNeededTheoretical)
#print("travelSpeed: " + str(travelSpeed))
#print("timeInitialDepotToNewOST " + str(timeInitialDepotToNewOST))
#print("temptyOST " + str(temptyOST))
#print("travelTimeNewOST " + str(travelTimeNewOST))
#print("travelTimeNewSinkToDepot " + str(travelTimeNewSinkToDepot))
#print("temptyTruck " + str(temptyTruck))
returnedToDepot = False # Only return to depot after farAwaysOST is emptied
farAwayOST = True
return farAwayOST, hPersonell, totTravelTime, totTravelDistance, loadedOnTrucks, totWorkingTime, returnedToDepot, statistics_NrOfDEPOVISIT, statistics_Ausnahme
# Is more time efficient to visit from current situtation than from depot
# Check if emptying is possible! As the w WWTP could not be reached make a tour via depot
if totTimeNeeded + hPersonell >= tpmax: # too much time is used
#print("Emptying not possible" + str(totTimeNeeded + hPersonell))
# Search closest sink
totTravelTime += travelTimeCurrentOstToSINK
totTravelDistance += distToSink
# Set load on trucks to zero and add emptying truck to working time
loadedOnTrucks, totWorkingTime = emptyAtSink(loadedOnTrucks, totWorkingTime, temptyTruck) # Empty Sink
# Travel from Sink to Starting point
distToDepot = distanceCalc2dKmFactor(SinkCoordainte, Initialdepot, df) # Distance from current WWTP to depot
travelTime = distToDepot / float(travelSpeed)
totTravelTime += travelTime
totTravelDistance += distToDepot
#print("Abweichung von h max: " + str(tpmax-(hPersonell+ travelTimeCurrentOstToSINK + temptyTruck + travelTime + temptyTruck)))
hPersonell = 0 # New day is started for personel
returnedToDepot = True # Return to Depot
statistics_NrOfDEPOVISIT += 1
farAwayOST = False
return farAwayOST, hPersonell, totTravelTime, totTravelDistance, loadedOnTrucks, totWorkingTime, returnedToDepot, statistics_NrOfDEPOVISIT, statistics_Ausnahme#, ausnahme
else:
#print("Emptying possible")
returnedToDepot = False
farAwayOST = False
return farAwayOST, hPersonell, totTravelTime, totTravelDistance, loadedOnTrucks, totWorkingTime, returnedToDepot, statistics_NrOfDEPOVISIT, statistics_Ausnahme#, ausnahme
def checkServiceTimeConstraint(currentOST, newWWTPCoordinates, Initialdepot, tpmax, hPersonell, totTravelTime, totTravelDistance, travelSpeed, df, totWorkingTime, tservice):
''' THis function checks if max work a day is reached. IF yes, add distance to depot and return to new node'''
# From current OST to next OST
distToNewost = distanceCalc2dKmFactor(currentOST, newWWTPCoordinates, df) # Distance from former position to new position
travelTimeTOnewOST = distToNewost / float(travelSpeed) # Travel Time
## All Distances needed for new OST
# From current Ost to new OST
distNewToOST = distanceCalc2dKmFactor(currentOST, newWWTPCoordinates, df) # Distance from current WWTP to depot
timeToNewOST = distNewToOST / float(travelSpeed) # Time neede from current OST to new OST
# from next OST to Depot
distNewOstToDepot = distanceCalc2dKmFactor(newWWTPCoordinates, Initialdepot, df) # Distance from current WWTP to depot
travelTimeNewOstToDepot = distNewOstToDepot / float(travelSpeed)
# Total potential needed time
totTimeNeeded = timeToNewOST + tservice + travelTimeNewOstToDepot # Time to drive to new OST + time spent at OST + time spent for driving to Depot
# In case a single OST is too far away to serve one day, make an exception
if totTimeNeeded > tpmax:
returnToDepot = False
return hPersonell, totWorkingTime, totTravelTime, totTravelDistance, returnToDepot
# If max working time would be reached with new OST, return to Depot
if hPersonell + totTimeNeeded >= tpmax:
distToDepot = distanceCalc2dKmFactor(currentOST, Initialdepot, df) # Distance from current WWTP to depot
dist = distToDepot #+ distFromDepotToNewNode
travelTime = dist / float(travelSpeed)
totTravelDistance += dist
totTravelTime += travelTime
hPersonell = 0 # New day is started for personel
returnToDepot = True
return hPersonell, totWorkingTime, totTravelTime, totTravelDistance, returnToDepot
else:
hPersonell += timeToNewOST + tservice
# Go to next WWTP and perform service
totTravelDistance += distToNewost
totTravelTime += travelTimeTOnewOST
totWorkingTime += tservice
returnToDepot = False
return hPersonell, totWorkingTime, totTravelTime, totTravelDistance, returnToDepot
def getCircleExtent(extent, r):
'''
Create random distribution within a circle within the extent for a given number of OST.
Input:
mindistBetweenWWTPs - minimum distance between OST
extent - [[topLeftPoint], [topRightPoint]]
nrOfOST - Nr of OST
r - radius
'''
randomPnts = []
TopLefXGlobal, TopLefYGlobal, BottomRightXGlobal, BottomRightYGlobal = extent[0][0], extent[1][1], extent[1][0], extent[0][1] # Get Coordinates of Extent
centerExtentX = TopLefXGlobal + ((BottomRightXGlobal - TopLefXGlobal) / 2)
centerExtentY = BottomRightYGlobal + ((TopLefYGlobal - BottomRightYGlobal) / 2)
# Add Distance in order that circle
TopLefXGlobal = centerExtentX - (r * 1000)
TopLefYGlobal = centerExtentY + (r * 1000)
BottomRightXGlobal = centerExtentX + (r * 1000)
BottomRightYGlobal = centerExtentY - (r * 1000)
circleExtent = [(TopLefXGlobal, BottomRightYGlobal), (BottomRightXGlobal, TopLefYGlobal)]
return circleExtent
def createRandomDisributionCircle(mindistBetweenWWTPs, extent, nrOfOST, r):
'''
Create random distribution within a circle within the extent for a given number of OST.
Input:
mindistBetweenWWTPs - minimum distance between OST
extent - [[topLeftPoint], [topRightPoint]]
nrOfOST - Nr of OST
r - radius
'''
randomPnts = []
TopLefXGlobal, TopLefYGlobal, BottomRightXGlobal, BottomRightYGlobal = extent[0][0], extent[1][1], extent[1][0], extent[0][1] # Get Coordinates of Extent
centerExtentX = TopLefXGlobal + ((BottomRightXGlobal - TopLefXGlobal) / 2)
centerExtentY = BottomRightYGlobal + ((TopLefYGlobal - BottomRightYGlobal) / 2)
# Add Distance in order that circle
TopLefXGlobal = centerExtentX - (r * 1000)
TopLefYGlobal = centerExtentY + (r * 1000)
BottomRightXGlobal = centerExtentX + (r * 1000)
BottomRightYGlobal = centerExtentY - (r * 1000)
circleExtent = [(TopLefXGlobal, BottomRightYGlobal), (BottomRightXGlobal, TopLefYGlobal)]
# Create Random randomPnts
randomID = 0
while len(randomPnts) < nrOfOST:
random_X = random.uniform(TopLefXGlobal, BottomRightXGlobal) # Select random X
random_Y = random.uniform(BottomRightYGlobal, TopLefYGlobal) # Selext random Y
randomPnt = [randomID, random_X, random_Y] # Points
distanceFromMiddleOfExtent = distanceCalc2dKm((centerExtentX, centerExtentY), (random_X, random_Y))
if distanceFromMiddleOfExtent < r:
# Check if minimum distance criteria is fulfilled
if len(randomPnts) > 0:
dCrit = True
for i in randomPnts:
#distance = math.hypot(random_X - i[0], random_Y - i[1])
distance = distanceCalc2dKm([random_X, random_Y], [i[1], i[2]]) # in km
if distance <= mindistBetweenWWTPs:
dCrit = False
if dCrit == True:
randomPnts.append(randomPnt)
randomID += 1
else:
randomPnts.append(randomPnt)
else:
_ = 0 #print("too far away")
return randomPnts, circleExtent
def reDistributeWWTPs(oldWWTP, extent, minDistBetwenWWTP, r):
'''
Input:
extent - [[topLeftPoint], [topRightPoint]]
minInterWWTPDistance - Minimum Distance Criteria for distribution. The clsoest two point are allowed in initial configuration
'''
#print("start redistribution")
newWWTP = []
TopLefXGlobal, TopLefYGlobal, BottomRightXGlobal, BottomRightYGlobal = extent[0][0], extent[1][1], extent[1][0], extent[0][1] # Get Coordinates of Extent
centerExtentX = TopLefXGlobal + ((BottomRightXGlobal - TopLefXGlobal) / 2)
centerExtentY = BottomRightYGlobal + ((TopLefYGlobal - BottomRightYGlobal) / 2)
# Add Distance in order that circle
TopLefXGlobal = centerExtentX - r * 1000 # in [m]
TopLefYGlobal = centerExtentY + r * 1000
BottomRightXGlobal = centerExtentX + r * 1000
BottomRightYGlobal = centerExtentY - r * 1000
circleExtent = [(TopLefXGlobal, BottomRightYGlobal), (BottomRightXGlobal, TopLefYGlobal)]
# Create Random newWWTP
pos = 0
while len(newWWTP) < len(oldWWTP):
random_X = random.uniform(TopLefXGlobal, BottomRightXGlobal) # Select random X
random_Y = random.uniform(BottomRightYGlobal, TopLefYGlobal) # Selext random Y
distanceFromMiddleOfExtent = distanceCalc2dKm((centerExtentX, centerExtentY), (random_X, random_Y))
if distanceFromMiddleOfExtent < r:
# Check if minimum distance criteria is fulfilled
if len(newWWTP) > 0:
dCrit = True
for i in newWWTP:
distance = distanceCalc2dKm([random_X, random_Y], [i[1], i[2]]) # in [km]
if distance <= minDistBetwenWWTP:
dCrit = False
if dCrit == True:
oldEntry = oldWWTP[pos]
oldEntry[1] = random_X
oldEntry[2] = random_Y
newWWTP.append(oldEntry)
pos += 1
else:
oldEntry = oldWWTP[pos]
oldEntry[1] = random_X
oldEntry[2] = random_Y
newWWTP.append(oldEntry)
pos += 1
return newWWTP, circleExtent
def clusterNN(WWTP, kreisFlache, extent, minInterWWTPDistance, NNATarget, WindowToPlaceRandomNode, NeighbourHoodDensityDistance, NrOfPointsToSelectAndPlaceNewNode):
'''
Input:
extent - Extent of Circle
minInterWWTPDistance - Minimum Distance Criteria for distribution. The clsoest two point are allowed in initial configuration
dKritImprov - Neighbourhood for improving index. How far away from node the random node can be selected
NeighbourHoodDensityDistance - How the Density is measured for determinig random point in aggregation
anzKritNeig - Which density cannot be choosen for node aggregation
NrOfPointsToSelectAndPlaceNewNode - How many nodes are placed in inner window to test
'''
checked = []
TopLefXGlobal, TopLefYGlobal, BottomRightXGlobal, BottomRightYGlobal = extent[0][0], extent[1][1], extent[1][0], extent[0][1] # Get Coordinates of Extent
randID = len(WWTP) + 1000
# ------------------------------------------------------------------------------------------------------------------
# After having created random distributed list with coordinates, iterate until NNA is fulfilled,
# ------------------------------------------------------------------------------------------------------------------
#print("INITIAL Circle Area: " + str(kreisFlache))
clusterDegree = averageNearestNeighborClustering(WWTP, kreisFlache) # Calculate NNA
#print("CClusterDegree: " + str(clusterDegree))
#print("NNATarget: " + str(NNATarget))
if clusterDegree < NNATarget:
print("The clustering degree is already smaller than the targeted nearest neighbour")
#print("clusterDegree: " + str(clusterDegree))
#print("NNATarget: " + str(NNATarget))
return [] # WWTP
else:
listWithNodesTooDense = []
while round(clusterDegree, 2) > NNATarget:
#print("Current NNA-Value: " + str(clusterDegree))
#print("Nr of checked: " + str(len(checked)))
#print("Dense pnts: " + str(len(listWithNodesTooDense)))
#print("----------------------------------------------")
nextPnt, returnToStart = False, 0
PntsCopy = list(WWTP)
# Find furthest away node
globalAvarageNNtoAll = 0
longestSum = 0 # Nearest Distance
for b in PntsCopy:
localSum = 0
for e in PntsCopy: # Search distance to closest point
dst = distanceCalc2dKm((b[1], b[2]), (e[1], e[2]))
localSum += dst
globalAvarageNNtoAll += localSum
if localSum > longestSum and b[0] not in checked:
longestSum = localSum
XID_fartherstAway = b
if len(checked) == WWTP:
#print("ERROR: Could not find a distribution")
return []
if returnToStart == 0:
removedPntList = removePnt(PntsCopy, XID_fartherstAway[0]) # Remove Point from current pnts
# -------------------------------
# Get random pnt based on density
# -------------------------------
tooClose = True
probabilityList = getNrOfNeighboursClustering(PntsCopy, NeighbourHoodDensityDistance) # Sort list according to number of neighbours
# Select random Point and define close range for selection new pnt
while nextPnt == False and returnToStart == 0:
while tooClose == True and returnToStart == 0:
probForDensitSelection = dict(probabilityList)
if len(probForDensitSelection) == 0:
print("ERROR: densitiyNeighbourhoodToConsiderPacement is too high. No point exists with so many neighbours")
checked.append(XID_fartherstAway[0])
returnToStart = 1
break
# Select node with highest density
highestDensit = 0
for z in probForDensitSelection:
if probForDensitSelection[z] > highestDensit:
ranID = z
highestDensit = probForDensitSelection[z]
# Get pnt coordinate
for i in PntsCopy:
if i[0] == ranID:
windowPnt = i
break
# Remove from probability list
scrapCopyList = {}
for i in probabilityList:
if i != ranID and i not in listWithNodesTooDense:
scrapCopyList[i] = probabilityList[i]
probabilityList = dict(scrapCopyList)
# ---------------------------
# Select random pnt in window
# ---------------------------
#print("WindowToPlaceRandomNode: " + str(WindowToPlaceRandomNode))
randomNodeSelection = 0
TopLefX = windowPnt[1] - WindowToPlaceRandomNode * 1000 # [m]
BottomRightX = windowPnt[1] + WindowToPlaceRandomNode * 1000 # [m]
BottomRightY = windowPnt[2] - WindowToPlaceRandomNode * 1000 # [m]
TopLefY = windowPnt[2] + WindowToPlaceRandomNode * 1000 # [m]
while randomNodeSelection < NrOfPointsToSelectAndPlaceNewNode and returnToStart != 1: # and selectRandomNode == True:
random_X, random_Y = random.uniform(TopLefX, BottomRightX), random.uniform(BottomRightY, TopLefY)
randID = XID_fartherstAway[0] #
randomNEWPnt = XID_fartherstAway
randomNEWPnt[0] = randID
randomNEWPnt[1] = random_X
randomNEWPnt[2] = random_Y
randID += 1
if random_X < TopLefXGlobal or random_X > BottomRightXGlobal or random_Y < BottomRightYGlobal or random_Y > TopLefYGlobal:
#print("Out of Extent")
_ = 0
else:
# -------------------------------------
# Check that random node is not too close of any node
# -------------------------------------
isCloseToBuilding = checkIfIsToocloseToBuilding(removedPntList, minInterWWTPDistance, random_X, random_Y)
if isCloseToBuilding == True: # If too close
randomNodeSelection += 1
else:
scrapCopy = list(removedPntList)
scrapCopy.append(randomNEWPnt)
# takes a lot of time
ANNDistanceNew = averageNearestNeighborClustering(scrapCopy, kreisFlache)
localGlobalNNAtoAll = 0
longestSum = 0
for b in scrapCopy:
localSum = 0
for e in scrapCopy: # Search distance to closest point
dst = distanceCalc2dKm((b[0], b[1]), (e[0], e[1]))
localSum += dst
localGlobalNNAtoAll += localSum
# check if nearest distance to closer
if ANNDistanceNew < clusterDegree or globalAvarageNNtoAll > localGlobalNNAtoAll:
removedPntList.append(randomNEWPnt)
WWTP = list(removedPntList)
clusterDegree = ANNDistanceNew
nextPnt = True
checked = []
returnToStart = 1
else:
print("IMPROVEMENT IS NOT FOUND A")
randomNodeSelection += 1
# Make faster and test which nodes are already too densifiey
_scrap = NrOfPointsToSelectAndPlaceNewNode # - 1
# Start kicking in later in algorithm because in advance with innerhouse distance it is soon too dense
if randomNodeSelection == _scrap and returnToStart != 1 and ranID not in listWithNodesTooDense:
listWithNodesTooDense.append(ranID)
returnToStart = 1
#print("finished clustering: " + str(clusterDegree))
return WWTP
def getNrOfNeighbours(PntsCopy, bufferNeighbour):
probabilityList = {}
for b in PntsCopy:
nrOfNeighborsinRadius = 0
for e in PntsCopy: # Search distance to closest point
dst = distanceCalc2dKm((b[0], b[1]), (e[0], e[1]))
if dst <= bufferNeighbour:
nrOfNeighborsinRadius += 1
probabilityList[b[0]] = nrOfNeighborsinRadius
return probabilityList
def getNrOfNeighboursClustering(PntsCopy, bufferNeighbour):
probabilityList = {}
for b in PntsCopy:
nrOfNeighborsinRadius = 0
for e in PntsCopy: # Search distance to closest point
dst = distanceCalc2dKm((b[1], b[2]), (e[1], e[2]))
if dst <= bufferNeighbour:
nrOfNeighborsinRadius += 1
probabilityList[b[0]] = nrOfNeighborsinRadius
return probabilityList
def getNearestWWTP(WWTP, depot):
distInitial = 99999999999
for i in WWTP:
currDist = distanceCalc2dKm(depot, (i[0], i[1]))
if currDist < distInitial:
distInitial = currDist
closestWWTP = (i[0], i[1])
# Del WWTP
copyWWTP = []
for i in WWTP:
if i[0] != closestWWTP[0] and i[1] != closestWWTP[1]:
copyWWTP.append(i)
WWTP = copyWWTP
return WWTP, closestWWTP
def calculateFailureProbability(linareFailureRatePerYear, daysSinceControl, randomAge, dayYear):
failureRateYear = linareFailureRatePerYear
# failuerRateDay = 1-0.999707 # 0.9=x^360
failuerRateDay = 1 - math.e ** (math.log(failureRateYear) / dayYear)
# Straith forward
failuerRateDay = linareFailureRatePerYear / dayYear
return failureRateYear, failuerRateDay
def addDailyFlow(OST, sludgeAndScumPerPE):
''' This function adds the daily flow of a wwtp by multiplying the dimension of a wwtp with average production'''
for i in OST:
i[8] += sludgeAndScumPerPE * i[4]
return OST
def dimensionWWTP(OST, minWWTPSize, maxWWTPSize, tankSizeINYears, sludgeAndScumPerPE, dayYear, NrYears, givenDistribution, FullFill):
'''This function assigns a random size and filling status'''
wwtpID = 0
newWWTP = []
for i in OST:
wwtpID += 1 # ID
Xcor, Ycor, randomZ = i[1], i[2], 100
if givenDistribution == True:
randomSize = i[4]
else:
randomSize = random.randint(minWWTPSize, maxWWTPSize) # [EW]
# Tank Size is max for one year
randomAge = random.randint(0, 30) # [year]
#daysSinceControl = random.randint(0, NrYears * dayYear) # [day]
daysLastSmartRoute = random.randint(0, dayYear) # [day] # Once a year
if FullFill == False:
randomFill = random.uniform(0, randomSize) # [EW]
randomTankFilling = dayYear * tankSizeINYears * sludgeAndScumPerPE * randomFill
# X, Y, Z, Size, Age, Days since control, working probability, working/YesOrNO, current filling status, days since last smartroute
newWWTP.append([wwtpID, Xcor, Ycor, randomZ, randomSize, randomAge, 0, True, randomTankFilling, daysLastSmartRoute])
else:
fullTank = dayYear * tankSizeINYears * sludgeAndScumPerPE * randomSize
z = [wwtpID, Xcor, Ycor, randomZ, randomSize, randomAge, 0, True, fullTank, daysLastSmartRoute]
newWWTP.append(z)
return newWWTP
def calcAverageOSTSize(WWTP):
'''Calculate average WWTP Size in PE.'''
avSizeWWWTP, totalPE = 0, 0
for i in WWTP:
avSizeWWWTP += i[4]
totalPE += i[4]
avSizeWWWTP = float(avSizeWWWTP) / len(WWTP)
return avSizeWWWTP, totalPE
def getTotalSludgeVolume(totalPE, sludgeAndScumPerPE, dayYear):
'''Calculate average WWTP Size in PE.'''
totSludgeScumVolume = totalPE * sludgeAndScumPerPE * dayYear # Total Sludge and Scum volume
return totSludgeScumVolume
def checkWWTPtoEmpty(OST, dayYear, tankSizeINYears, sludgeAndScumPerPE, tankFillPeopleCall):
''' Check if there is a wwtp to empty. Change criteria that tanks needs emptying
Input
WWTP - List wiht WWTP
dayYear - Nr of days a year
tankSizeINYears - Tank size in eary
sludgeAndScumPerPE - accumulation rate per PE
tankFillPeopleCall - Time when people call
Output:
WWTPstoEmpty - All WWTPs which need to be emptied
OST - Updated WWTP
'''
OSTCopy = copy.deepcopy(OST)
WWTPstoEmpty = []
digitsToRound = 3
for i in OSTCopy:
totalTankCapacity = i[4] * dayYear * tankSizeINYears * sludgeAndScumPerPE # Calc total tank capacity
if i[8] >= round((totalTankCapacity * tankFillPeopleCall), digitsToRound): # If tank is lager than tank filling at moment in time when people call
crittanksIsFull = True
else:
crittanksIsFull = False
i[7] = crittanksIsFull
if crittanksIsFull == True:
WWTPstoEmpty.append(i)
for i in WWTPstoEmpty:
for f in OST:
if i[0] == f[0]:
f[8] = 0
return WWTPstoEmpty, OST
def assignFailures(WWTP, linareFailureRatePerYear, dayPerYear):
''' Define which WWTP have a failure'''
nrFailes = 0
for i in WWTP:
_, failureRateDay = calculateFailureProbability(linareFailureRatePerYear, 1, 1, dayPerYear) # Test if works or not
randomNr = random.uniform(0, 1)
if randomNr >= failureRateDay:
failOrWork = True
else:
failOrWork = False
nrFailes += 1
i[7] = failOrWork
failedWWTP = getAllFailures(WWTP) # Get all WWTP which failed
return WWTP, nrFailes, failedWWTP
'''
# In case the sludge needs to be pumped in case of failure. Insert the flow at top position
FlowFailout = 0
for i in WWTP:
if i[7] == False:
FlowFailout += i[8]
WWTPstoEmpty.insert(0, i) # Priorise and put to first place
'''
def checkIfthereIsSpaceForAnotherWWTP(currentLoad, cltruck, i):
'''
check if the next wwtp to check woudl have space in truck
WWTP - List with OST
currentLoad - Current Load
cltruck - Truck load capacity
'''
capacityReached = 0
potentiallyNewLoad = currentLoad # Potential new load
stillEmptyCapacity = cltruck - currentLoad # Potential empty space
potentiallyNewLoad += i[8] # Assumed complete tank is loaded
if potentiallyNewLoad >= cltruck: # IF more on truck
capacityReached = 1 # Max Tank is reached
return capacityReached, stillEmptyCapacity
def getClosestSink(currentPosition, sinks, df):
shortestDist = 999999999
for i in sinks:
dist = distanceCalc2dKmFactor((currentPosition[0], currentPosition[1]), (i[0], i[1]), df)
if dist < shortestDist:
shortestDist = dist
closestDepot = (i[0], i[1])
return closestDepot
def emptyAtSink(loadedOnTrucks, totWorkTime, avTimeToEmptyAtSink):
''' Empty Truck at WWTP. Put load on truck, add time needed'''
loadedOnTrucks = 0
totWorkTime += avTimeToEmptyAtSink # Could be more complex calculation
return loadedOnTrucks, totWorkTime
def getClosestWWTP(WWTP, coordinates):
''' This function searches the clostst WWTP and delets it out of the list
Input:
WWTP - List with WWTP
coordinates - Coordinate to which the closeset WWTP should be found
Output:
WWTsavingListew - List with removed WWTP
closestWWTP - Closest WWTP
'''
shortestDist = 999999999999
WWTsavingListew = []
for i in WWTP:
dist = distanceCalc2dKm((i[1], i[2]), (coordinates[1], coordinates[2]))
if dist < shortestDist:
shortestDist = dist
closestWWTP = i
for i in WWTP:
if i[1] != closestWWTP[1] and i[2] != closestWWTP[2]:
WWTsavingListew.append(i)
return WWTsavingListew, closestWWTP
def createRoute(OST, depotCord):
''' Search cloest WWTP from Depot, then get next closest until all are visited. Store this order in WWTP
OST - list with WWTP
depotCord - Depot
Output:
visitedWWTP - rearranged WWTPs in List according to a nearest neighbour path
'''
InitialID = -1
Depot = (InitialID, depotCord[0], depotCord[1])
#print("start creating route...")
if len(OST) == 1:
return OST
visitedWWTP = []
# Get closest
while len(OST) >= 1:
OST, closest = getClosestWWTP(OST, Depot)
visitedWWTP.append(closest)
Depot = closest
return visitedWWTP
def getAllFailures(WWTP):
failedWWTP = []
for i in WWTP:
if i[7] == False: # found failure
failedWWTP.append(i)
return failedWWTP
def repairWWTP(WWTP, coordinates):
''' Repair WWT'''
for e in WWTP:
if e[1] == coordinates[0] and e[2] == coordinates[1]:
e[7] = True # Repaired
break
return WWTP
def calculateRadius(prozentDistance, tpmax, travelSpeed, maxWWTPSize, cltruck, dayYear, sludgeAndScumPerPE, df):
''' calculates Radius and returns distanc in km
Input:
prozentDistance - Percent of max radius
'''
# timesToEmpty = (float(maxWWTPSize)*dayYear*sludgeAndScumPerPE)/float(cltruck)
# print("Times To Empty: " + str(timesToEmpty))
r = ((tpmax / 2) * travelSpeed) / df # / timesToEmpty # Because there and back again --> divide by 2
distanceKm = r * prozentDistance
return distanceKm
def emptyWWTP(OST, newOST, stillEmptyCapacity, currentLoad, temptyOST, totWorkingTime, currentWorkingHours):
'''Go to new WWTP and take as much until the vehicle filled.
Input:
OST - List with WWTP
newOST - New OST coordinates
stillEmptyCapacity - Possible load capacity which still fits the truck
currentLoad - Current load
temptyOST - Time needed to empty truck
currentWorkingHours - How many hours already workign
'''
currentWorkingHours += temptyOST
totWorkingTime += temptyOST
for i in OST:
if i[1] == newOST[0] and i[2] == newOST[1]:
currentTank = i[8]
newFill = currentTank - stillEmptyCapacity
if newFill > 0: # Partial Emptying
i[8] = newFill
currentLoad += stillEmptyCapacity # Truck is full
else: # Full emptying
i[8] = 0
currentLoad += currentTank
return OST, currentLoad, currentWorkingHours, totWorkingTime
def storeOSTDistribution(WWTP):
''' This function stores the size-distribution of the WWTPs'''
OSTDistribution = list(WWTP)
return OSTDistribution
def checkIfCurrentWWTPIsEmpty(WWTP, i):
''' This function checks if a current OST is empty
Input:
WWTP - List with WWTP
i - WWTP to check
Output:
currentIsempty - Criteria wheter empty or not
'''
for b in WWTP:
if i[1] == b[1] and i[2] == b[2]:
if i[8] == 0:
currentIsEmpty = True
else:
#print("CURRENT LOAD: " + str(i[8]))
currentIsEmpty = False
return currentIsEmpty
def calcYearlyAverage(resultList, yearResults, NrYears, r, circleArea, densityPerKm2, PEperKm2):
''' This function calcules the average of many years
resultList - List to store results
yearResults - List with results per year
nrYears - Number of years
r - radius
circleArea - circle area
densityPerKm2 - Density per km
PEperKm2 - Density per PE
'''
a, b = [],[]
#c = []
#for iteratio in resultOperationCall:
for iteratio in yearResults:
a.append(iteratio[0])
b.append(iteratio[1])
#c.append(iteratio[2]) # Scrap
costs = sum(a) / float(NrYears)
time = sum(b) / float(NrYears)
z = [r, circleArea, densityPerKm2, PEperKm2, costs, time, 0] # Average cost of all iterations
resultList.append(z)
return resultList
def shrinkRadiusFunction(shrinkRadius, r, currIteration, IterationsToAchieve, Rmin, Rshrink):
'''
This function shrinks the radius.
Input:
shrinkRadius - True: Shrink takes place
r - current radius
currIteration - Current Iteration
IterationsToAchieve - Iterations to achieve
Rmin - Minimum Radius
Rshrink - How much the radius gets shrinked
Output:
r - New Radius
currIteration - Current Iteration
'''
if shrinkRadius == True:
if r == Rmin and currIteration == IterationsToAchieve: # If current radius is minimum radius and number of iterations achieved exit definitely
r, currIteration = 0, 99999999999
else:
if currIteration == IterationsToAchieve: # If number of iterations achieved, shrink radius
r -= Rshrink # Decrease Radius
currIteration = 999999999999999
if r < Rmin: # Set to minium radius
r = Rmin # define as minimum
currIteration = 999999999999999
return r, currIteration
def convertToCosts(totTravelDistance, totTravelTime, totWorkingTime, laborCostSludgeH, fixedCostKM, crentH, totalPE):
''' Convert to Costs
Input:
totTravelDistance - Tot travel distance
totTravelTime - Tot travel time
totWorkingTime - Tot labor time
laborCostSludgeH - Labor costs per h
fixedCostKM - - Fixed costs per km
crentH
totalPE
Output:
nrOfWeeks, totTimePE, fixedCostOperationPe, variableCostOperationPe, fixedCostOperation, variableCostOperation, sr_tot_operation_costPe
'''
# Costs Personell
costPersonellTravelTime = totTravelTime * laborCostSludgeH # Cost of driving time for personnel
costWorkingTime = totWorkingTime * laborCostSludgeH # Cost of of working time personnel
# Cost Vehicle
costTruckDriveTime = totTravelTime * crentH # Variable Costs Truck Renting
costTruckRent = totWorkingTime * crentH # Fixed Cost Truck Renting (really without driving time?
totalCostDistanceKMSludge = fixedCostKM * totTravelDistance # Travel Cost (distance method)
#print("Anzaghl Stunden gemietet ROUTE: " + str(totWorkingTime + totTravelTime))
#print("Kosten Stunden gemietet ROUTE: " + str(costTruckRent + costTruckDriveTime))
# Sum Costs
totCosts = costPersonellTravelTime + totalCostDistanceKMSludge + costTruckRent + costTruckDriveTime + costWorkingTime
#fixedCostOperation = costTruckRent + costWorkingTime
#variableCostOperation = float(costPersonellTravelTime + totalCostDistanceKMSludge + costTruckDriveTime)
#fixedCostOperationPe = fixedCostOperation / float(totalPE) # Per PE
#variableCostOperationPe = variableCostOperation / float(totalPE) # Per PE
#print(" ")
#print("COSTS")
#print(" Distance Cost: " + str(totalCostDistanceKMSludge))
#print(" costPersonellTravelTime Cost: " + str(costPersonellTravelTime))
#print(" costTruckRent Cost: " + str(costTruckRent))
totCostsPE = totCosts / float(totalPE) # Per PE
totTimePE = (totTravelTime + totWorkingTime) / totalPE
return totTimePE, totCostsPE
def convertToCostsserviceTour(totTravelDistance, totTravelTime, totWorkingTime, cp, costPerKM, truckRentPerh, totalPE):
''' Convert to Costs
Input:
totTravelDistance - Total travel distance
totTravelTime - Total travel time
totWorkingTime - Total working time
cp - Labor costs
costPerKM - costs per km
truckRentPerh - truck rent per h
totalPE - total PE
Output:
totTimePE - Total Time per PE
totCostPerPE - Total Costs per PE
'''
# Cost Personnel
costTravelTime = totTravelTime * cp # Cost of driving time for personnel
costWorkingTime = totWorkingTime * cp # Cost of of working time personnel
# Cost Vehicle
costFixedKM = totTravelDistance * costPerKM # Travel Cost (distance method)
costTruckRent = totWorkingTime * truckRentPerh # Fixed Cost Truck Renting
costTruckDriveTime = totTravelTime * truckRentPerh # Variable Costs Truck Renting
# Sum Costs
totCosts = costTravelTime + costWorkingTime + costFixedKM + costTruckRent + costTruckDriveTime # Method fixed cost per km
# Summen Costs
totCostPerPE = totCosts / float(totalPE)
# Calculate Parameters
totTimePE = (totTravelTime + totWorkingTime) / totalPE
return totTimePE, totCostPerPE
def writeTotxtSensitivity(outListStep_point, name, inList):
"""
This functions writes to a .txt file.
Input Arguments:
outListStep_point -- Path to folder where .txt file is stored
name -- Name of .txt file
inList -- List with entries to write to .txt file
"""
outfile = outListStep_point + name + ".txt"
myDocument = open(outfile, 'w')
print(myDocument)
myDocument.write(str(inList))
myDocument.close()
return
def getRadiusCosts(liste, iterations):
''' This function calcules the average costs per radius of the Scheduled Evacuation
Input:
Liste - Results
iterations - Number of iterations
Output:
avRadiusCosts - Average costs per radius
'''
avRadiusCosts = []
# Operation Cost Smart Route
pos, a,b,c = 1, 0,0,0
std_a, std_b, std_c = [], [], []
for i in liste:
a += i[4]
b += i[5]
c += i[6]
std_a.append(i[4])
std_b.append(i[5])
std_c.append(i[6])
if pos == iterations:
pos = 0
# Calculate standard deviations
SD_a = numpy.std(std_a)
SD_b = numpy.std(std_b)
SD_c = numpy.std(std_c)
# Radius, Circle Area, Density per km2, PE per km2, total cost, time, averagecost per m3
z = [
i[0],
i[1],
i[2],
i[3],
a/float(iterations),
b/float(iterations),
c/float(iterations),
SD_a,
SD_b,
SD_c
]
avRadiusCosts.append(z)
a,b,c = 0,0,0
std_a, std_b, std_c = [], [], []
pos += 1
return avRadiusCosts
def summenDayCostsOfYear(costYearCall, dailyCostListCallSludge):
''' Summen costs of all days of one year and empty list
Input:
costYearCall - List to store yearly average results
dailyCostListCallSludge - List with costs of each day of one year
Output:
costYearCall - Update liste with another result of one year
dailyCostListCallSludge - Empty list of one year
'''
sumTotCosts, sumTotTime = 0, 0
for i in dailyCostListCallSludge:
sumTotCosts += i[1] # Cost
sumTotTime += i[2] # Time
z = (sumTotCosts, sumTotTime) #, sumTotCostsVariable)
costYearCall.append(z)
dailyCostListCallSludge = [] # Empty list to store daily results
return costYearCall, dailyCostListCallSludge
def readInstreetVertices(pathstreetVertices):
"""
This functions reads in a .txt file.
Input Arguments:
pathstreetVertices -- Path to .txt file
Output Arguments:
StreetVertices -- Street Vertices
"""
txtFileList = readLines(pathstreetVertices)
StreetVertices = []
for i in txtFileList:
lineElements = i.split()
StreetVertices.append([int(lineElements[0][1:-1]), float(lineElements[1][:-1]), float(lineElements[2][:-1]), float(lineElements[3][0:-1])])
return StreetVertices
def readInDictionary(pathInFile):
"""
This functions reads a .txt file into a dictionary.
Input Arguments:
pathInFile -- Path to .txt file
Output Arguments:
outDictionary -- Dictionary
"""
txtFileList = readLines(pathInFile)
outDictionary = {} # empty Dictionary
for i in txtFileList:
spl = i.split(None, 1) # get index
index = int(spl[0])
entries = spl[1].split(",",)
subDict = {}
# First Entry
firstEntry = entries[0].split()
subDict[int(firstEntry[0][1:-1])] = float(firstEntry[1][:-1])
# entries in between
if len(entries) >= 2:
for entry in entries[1:-1]:
splitEntry = entry.split(None,)
subDict[int(splitEntry[0][:-1])] = float(splitEntry[1])
# Last Entry
lastEntry = entries[-1].split()
subDict[int(lastEntry[0][:-1])] = float(lastEntry[1][:-1])
else:
lastEntry = entries[-1].split()
subDict[int(lastEntry[0][1:-1])] = float(lastEntry[1][:-1])
outDictionary[index] = subDict
return outDictionary
def readLines(pathInFile):
"""
This functions reads out lines of a .txt file
Input Arguments:
pathInFile -- Path to .txt file
Output Arguments:
readLines -- Statistics
"""
inputfile = open(pathInFile, 'r') # Set Path to existing .txt-File with R results
lineArray = inputfile.readlines() # Read in single result lines from txtfile into List
readLines = [] # Create empty list to fill in whole txt File with results
position = 0
while position < len(lineArray): # Iterate through list with line-results by position
entry = lineArray[position] # Get line at position
readLines.append(entry) # Append line at position to empty List
position += 1 # For the loop
inputfile.close() # Close result .txt file
return readLines
def removePnt(PntsCopy, ID):
'''remove from list'''
removedPntList = []
for e in PntsCopy:
if e[0] == ID:
_ = 0
else:
removedPntList.append(e)
return removedPntList
def averageCostPerR(serviceTour, sheduledEvacuation, unsheduledEvacuation, repairTour, resultsService, resultsScheduledEvacuation, resultsUnscheduledEvacuation, resultsRepair, iterations):
'''
This function calculates the average for all iterations.
Input:
serviceTour - Service criteria
sheduledEvacuation - Scheduled Evacuation criteria
unsheduledEvacuation - Unscheduled Evacuation criteria
repairTour - Repair Tour
resultsService - Results service
resultsScheduledEvacuation - Results Scheduled Evacuation
resultsUnscheduledEvacuation - Results unscheduled Evacuation
resultsRepair - Results repair
iterations - Numer of iterations
Output:
averaged costs
'''
avRadiusCostserviceTour, avRadiusCostSheduledEvacuation, avRadiusCostUnsheduledEvacuation, avRadiusCostRepair = [], [], [], []
if serviceTour == True:
#print("Result: Service Tour")
#avRadiusCostserviceTour = getRadiusCostsserviceTour(resultsService, iterations)
avRadiusCostserviceTour = getRadiusCosts(resultsService, iterations)
if sheduledEvacuation == True:
#print("Result: Sheduled Evacuation")
#avRadiusCostSheduledEvacuation = getRadiusCostsSRsludge(resultsScheduledEvacuation, iterations)
avRadiusCostSheduledEvacuation = getRadiusCosts(resultsScheduledEvacuation, iterations)
if unsheduledEvacuation == True:
#print("RESULT: Unsheduled Evacuation")
#avRadiusCostUnsheduledEvacuation = getRadiusCostCallSludge(resultsUnscheduledEvacuation, iterations)
avRadiusCostUnsheduledEvacuation = getRadiusCosts(resultsUnscheduledEvacuation, iterations)
if repairTour == True:
#print("Result:Repair")
#avRadiusCostRepair = getRadiusCostCallSludge(resultsRepair, iterations)
avRadiusCostRepair = getRadiusCosts(resultsRepair, iterations)
# Create a list with summed costs to calculate standard devition of total costs...
return avRadiusCostserviceTour, avRadiusCostSheduledEvacuation, avRadiusCostUnsheduledEvacuation, avRadiusCostRepair
def summenAllCostsScatter(fullCostSumming, IterationsToAchieve, sheduledEvacuation, unsheduledEvacuation, serviceTour, repairTour, listWithIterResultSheduledEvacuation, listWithIterResultsUnsheduled, listWithIterResultServiceTour, listWithIterResultsRepair, totalPE, totSludgeScumVolume):
if fullCostSumming == True:
fullCostList = []
if sheduledEvacuation == True and unsheduledEvacuation == True:
print("ERROR: DECIDE WHICH ONE TO WRITE SUMMEN")
prnt("..")
# Add sheduled evacuation
if sheduledEvacuation == True and unsheduledEvacuation == False:
print("sheduledEvacuation")
if len(fullCostList) == 0:
for i in listWithIterResultSheduledEvacuation:
fullCostList.append(i) # Add Smart Route Costs
else:
pos = 0
for i in listWithIterResultSheduledEvacuation:
fullCostList[pos][4] += i[4] # Add Sheduled Cost
fullCostList[pos][5] += i[5] # Add Time
fullCostList[pos][6] += i[6] # scrap
pos += 1
fullCostList_A = copy.deepcopy(fullCostList)
# Add unsheduled evacuation
if unsheduledEvacuation == True and sheduledEvacuation == False:
print("unsheduledEvacuation")
if len(fullCostList_A) == 0:
for i in listWithIterResultsUnsheduled:
fullCostList_A.append(i) # Add Smart Route Costs
else:
pos = 0
for i in listWithIterResultsUnsheduled:
fullCostList_A[pos][4] += i[4] # Add Unsheduld COst
fullCostList_A[pos][5] += i[5] # Add unsheduled time
fullCostList_A[pos][6] += i[6] # scrap
pos += 1
fullCostList_B = copy.deepcopy(fullCostList_A)
# Add Service
if serviceTour == True:
if len(fullCostList_B) == 0:
for i in listWithIterResultServiceTour:
fullCostList_B.append(i) # Add Smart Route Costs
else:
pos = 0
for i in listWithIterResultServiceTour:
fullCostList_B[pos][4] += i[4] # Add service cost
fullCostList_B[pos][5] += i[5] # Add service time
fullCostList_B[pos][6] += i[6] # scrap
pos += 1
# Update if new one
fullCostList_C = copy.deepcopy(fullCostList_B)
# Add Repair
if repairTour == True:
if len(fullCostList_C) == 0:
for i in listWithIterResultsRepair:
fullCostList_C.append(i) # Add Smart Route Costs
else:
pos = 0
for i in listWithIterResultsRepair:
fullCostList_C[pos][4] += i[4] # Add Smart Route Costs
fullCostList_C[pos][5] += i[5] # Add Smart Route Costs
fullCostList_C[pos][6] += i[6] # Add Smart Route Costs
pos += 1
# Update if new one
fullCostList_Final_scatter = copy.deepcopy(fullCostList_C)
# -------------------
# Add Costs per m3
# -------------------
for i in fullCostList_Final_scatter:
averageCostperQubikSludge = (i[4] * totalPE) / totSludgeScumVolume # Averacge Cost per PE Totale PE / totalsludgevolume
i[6] = averageCostperQubikSludge
# -------------------------
# Calculate Measure for Paper
# -------------------------
# Calculate Standard Deviation of the paramters for paper
measures = []
a,b,c,std_a, std_b, std_c = 0, 0, 0, [], [], []
pos = 1
for i in fullCostList_Final_scatter:
#print("i: " + str(i))
a += i[4]
b += i[5]
c += i[6]
std_a.append(i[4]) # Cost
std_b.append(i[5]) # Time
std_c.append(i[6]) # Cost per m3
if pos == IterationsToAchieve:
#print("ois;" + str(pos))
pos = 0
SD_a = numpy.std(std_a) # Calculate standard deviations of total costs
SD_b = numpy.std(std_b) # Calculate standard deviations of used time
SD_c = numpy.std(std_c) # Calculate standard deviations of costs per m3
z = [
i[0],
i[1],
i[2],
i[3],
a/IterationsToAchieve, # Cost
b/IterationsToAchieve, # Time
c/IterationsToAchieve, # Cost per m3
SD_a, # Standard Devition of total costs
SD_b, # Standard Devition of total costs
SD_c
]
measures.append(z)
a,b,c = 0,0,0
std_a, std_b, std_c = [], [], []
pos += 1
return fullCostList_Final_scatter, measures
def summenTotalCosts(fullCostSumming, sheduledEvacuation, unsheduledEvacuation, serviceTour, repairTour, avRadiusCostSheduledEvacuation, avRadiusCostUnsheduledEvacuation, avRadiusCostserviceTour, avRadiusCostRepair):
''' Summen Costs of different tasks for each Radius (summing averages therfore) '''
if fullCostSumming == True:
fullCostList = [] # List with final results
if sheduledEvacuation == True and unsheduledEvacuation == True:
print("ERROR: DECIDE WHICH ONE TO WRITE SUMMEN")
prnt("...")
# Add sludge costs smart route
if sheduledEvacuation == True and unsheduledEvacuation == False:
#print("SUMM COSTS: -- Sheduled Costs adding: " + str(avRadiusCostSheduledEvacuation))
#print(" ")
if len(fullCostList) == 0:
for i in avRadiusCostSheduledEvacuation:
fullCostList.append(i) # Add Smart Route Costs
else:
pos = 0
for i in avRadiusCostSheduledEvacuation:
fullCostList[pos][4] += i[4] # Add Smart Route Costs
fullCostList[pos][5] += i[5] # Add Smart Route Costs
fullCostList[pos][6] += i[6] # Add Smart Route Costs
pos += 1
fullCostList_A = copy.deepcopy(fullCostList)
# Add sludge costs sludge bedarfsentleerung
if unsheduledEvacuation == True and sheduledEvacuation == False:
#print("SUMM COSTS: -- Unsheduled Costs adding: " + str(avRadiusCostUnsheduledEvacuation))
#print(" ")
if len(fullCostList_A) == 0:
for i in avRadiusCostUnsheduledEvacuation:
fullCostList_A.append(i) # Add Smart Route Costs
else:
pos = 0
for i in avRadiusCostUnsheduledEvacuation:
fullCostList_A[pos][4] += i[4] # Add Smart Route Costs
fullCostList_A[pos][5] += i[5] # Add Smart Route Costs
fullCostList_A[pos][6] += i[6] # Add Smart Route Costs
pos += 1
fullCostList_B = copy.deepcopy(fullCostList_A)
if serviceTour == True:
#print("SUMM COSTS: -- Service Costs adding: " + str(avRadiusCostserviceTour))
#print(" ")
if len(fullCostList_B) == 0:
for i in avRadiusCostserviceTour:
fullCostList_B.append(i) # Add Smart Route Costs
else:
pos = 0
for i in avRadiusCostserviceTour:
fullCostList_B[pos][4] += i[4] # Add Smart Route Costs
fullCostList_B[pos][5] += i[5] # Add Smart Route Costs
fullCostList_B[pos][6] += i[6] # Add Smart Route Costs
pos += 1
# Update if new one
fullCostList_C = copy.deepcopy(fullCostList_B)
if repairTour == True:
#print("SUMM COSTS: -- Repair Costs adding: " + str(avRadiusCostRepair))
#print(" ")
if len(fullCostList_C) == 0:
for i in avRadiusCostRepair:
fullCostList_C.append(i) # Add Smart Route Costs
else:
pos = 0
for i in avRadiusCostRepair:
fullCostList_C[pos][4] += i[4] # Add Smart Route Costs
fullCostList_C[pos][5] += i[5] # Add Smart Route Costs
fullCostList_C[pos][6] += i[6] # Add Smart Route Costs
pos += 1
# Update if new one
fullCostList_Final = copy.deepcopy(fullCostList_C)
return fullCostList_Final
# Calibration
def dijkstra(streetNetwork, idp0, idp1):
"""
This function gets the path from the Djikstra list.
Input Arguments:
streetNetwork -- Distances to all nodes
idp0 -- Start node
idp1 -- Endnode
heightDiff -- Height Difference
Output Arguments:
archPathList -- Updates distances to all nodes.
distStartEnd -- Distance between the two nodes.
"""
distances, listDijkstra = dijkstraAlgorithm(streetNetwork, idp0) # calculate djikstra distances
scrapPathDjika, distStartEnd = writePath(listDijkstra, distances, idp0, idp1)
archPathList = archPath(scrapPathDjika, distances) # the achPathList contains all intermediary, not pouplated nodes of the street network. List gets afterwards appended to P
return archPathList, distStartEnd
def relax(StreetNetwork, u, v, D, P):
"""
Relaxing function of Djikstra Algorithm
Input Arguments:
StreetNetwork -- StreetNetwork
u,v,D -- Djikstra-related varaibles
P -- Djikstra-List with all distances to each node
"""
inf = float('inf')
d = D.get(u, inf) + StreetNetwork[u][v] # Possible shortcut estimate
if d < D.get(v, inf): # Is it really a shortcut?
D[v], P[v] = d, u # Update estimate and p
def dijkstraAlgorithm(StreetNetwork, s):
"""
Djikstra Algorithm to find shortest rout on street network.
Input Arguments:
StreetNetwork -- StreetNetwork
s -- Starting node
Output Arguments:
D -- Distances to every node from s
P -- Djikstra-List
"""
D, P, Q, S = {s:0}, {}, [(0, s)], set()
while Q: # All nodes are checked?
smallesvalue, cnt = 99999999, 0
for i in Q:
if i[0] < smallesvalue:
smallesvalue, u = i[0], i[1]
smallesPosition = cnt
cnt += 1
del Q[smallesPosition]
if u in S: continue # Already visited? Skip it
S.add(u) # We've visited it now
for v in StreetNetwork[u]: # Go through all its neighbors
relax(StreetNetwork, u, v, D, P) # Relax the out-edges
Q.append((D[v], v)) # visited
return D, P
def calculateDistanceFactor(WWTPsmartRoute, streetVertices, streetNetwork, Initialdepot):
'''
Compare distance along the street and craw
'''
factorCrawDirect = []
cnt = 0
InitialdepotiWithID = [0, Initialdepot[0], Initialdepot[1]]
WWTPsmartRoute.insert(0, InitialdepotiWithID) # start from depot
WWTPsmartRoute.append(InitialdepotiWithID) # return to depot
# Initial find depot
closestDist = 999999999999
for i in streetVertices:
dist = distanceCalc2d([Initialdepot[0], Initialdepot[1]], [i[1], i[2]])
if dist < closestDist:
closestTo = i[0]
closestX = i[1]
closestY = i[2]
closestDist = dist
fromNode = closestTo
fromX = closestX
fromY = closestY
for edge in WWTPsmartRoute:
closestDist = 999999999999
for i in streetVertices:
dist = distanceCalc2d([edge[1], edge[2]], [i[1], i[2]])
if dist < closestDist:
closestTo = i[0]
closestX = i[1]
closestY = i[2]
closestDist = dist
toNode = closestTo
try:
_, streetDistanceInM = dijkstra(streetNetwork, fromNode, closestTo) # Djikstra
streetDistanceInKM = streetDistanceInM / 1000
crawDistanceinM = distanceCalc2d([fromX, fromY], [closestX, closestY])
crawDistanceinKM = crawDistanceinM / 1000
if crawDistanceinM != 0:
f_dist = float(streetDistanceInKM) / float(crawDistanceinKM)
factorCrawDirect.append(f_dist)
except KeyError:
_ = 0
fromNode = closestTo
fromX = closestX
fromY = closestY
cnt += 1
# Delete first and last
del WWTPsmartRoute[0]
del WWTPsmartRoute[len(WWTPsmartRoute)-1]
# calc average
summe = 0
for i in factorCrawDirect:
summe += i
if summe > 0:
averageDistanceFactor = summe / len(factorCrawDirect)
else:
averageDistanceFactor = 0
return averageDistanceFactor
def distanceCalc2d(p0, p1):
"""
This functions calculates the euclidian distance in 2d space.
Input Arguments:
p0, p1 -- Points with coordinates. Form: (ID, X, Y, Z)
Output Arguments:
distance -- Distance between the points not taking in account the height difference.
"""
distance = math.hypot(p0[0] - p1[0], p0[1] - p1[1])
return distance
def writePath(dijkstraList, distancesDijka, start, end):
"""
This function writes out the direct path from list generated with djikstra algorithm.
Start node needs to be the same from where the dijkstra list is calculcated.
Input Arguments:
dijkstraList -- All edges
distancesDijka -- List with dijkstra distances
start -- start node
end -- end node
Output Arguments:
path -- Path
distStartEnd -- Total distance from start to end
"""
path, up, distStartEnd = [end], None, distancesDijka[end]
while up != start and start != end: # As long the startin node is not found
up = dijkstraList[end]
path.append(up)
end = up
return path, distStartEnd
def archPath(dijkstra, distances):
"""
This functions writes out the edges from the djikstra algorithm of the new path to be added (archPath).
Input Arguments:
dijkstra -- Path of djikstra
distances -- Dijkstra-Distances
Output Arguments:
archPoints -- List with path
"""
archPoints, cnter = [], -1
dijkstra = dijkstra[::-1] # Invert pathOrigin to get correct flow to origin
# ArchPath: Calculate Distances on the path and the whole path to archPoints
for i in dijkstra:
cnter += 1
if cnter > 0:
nextElement = i
interDist = distances[nextElement] - distances[oldElement]
archPoints.append((oldElement, (nextElement, interDist))) # Form: [(from, (to, dist)), (....)]
oldElement = i
return archPoints
| lgpl-3.0 |
turbinesFoam/turbinesFoam | tutorials/actuatorLine/pitching/plot.py | 1 | 1418 | #!/usr/bin/env python
"""Visualization for OpenFOAM actuatorLine simulation."""
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def loadperf():
df = pd.read_csv("postProcessing/actuatorLines/0/foil.csv")
df = df.drop_duplicates("time", keep="last")
df["alpha_rad"] = df.alpha_deg/180.0*np.pi
df["cn"] = df.cl*np.cos(df.alpha_rad) - df.cd*np.sin(df.alpha_rad)
df["cc"] = df.cl*np.sin(df.alpha_rad) - df.cd*np.cos(df.alpha_rad)
return df
def plot_alpha():
df = loadperf()
plt.figure()
plt.plot(df.time, df.alpha_geom_deg, label="Geometric")
plt.plot(df.time, df.alpha_deg, label="Actual")
plt.xlabel("Time (s)")
plt.ylabel("Angle of attack (deg)")
plt.legend(loc="best")
plt.tight_layout()
def plot_cn(t0=0.5):
df = loadperf()
plt.figure()
ind = df.time >= t0
plt.plot(df.alpha_geom_deg[ind], df.cn[ind])
plt.xlabel(r"$\alpha$ (geometric, degrees)")
plt.ylabel(r"$C_N$")
plt.xlim((0, None))
plt.ylim((0, None))
plt.tight_layout()
def plot_cc(t0=0.5):
df = loadperf()
plt.figure()
ind = df.time >= t0
plt.plot(df.alpha_geom_deg[ind], df.cc[ind])
plt.xlabel(r"$\alpha$ (geometric, degrees)")
plt.ylabel(r"$C_C$")
plt.tight_layout()
if __name__ == "__main__":
plot_alpha()
plot_cn()
plot_cc()
plt.show()
| gpl-3.0 |
sergiopasra/numina | numina/array/display/ximshow.py | 3 | 34038 | #
# Copyright 2015-2021 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
import argparse
from astropy.io import fits
import numpy as np
import re
from .matplotlib_qt import set_window_geometry
from .pause_debugplot import pause_debugplot
from .fileinfo import list_fileinfo_from_txt
from .fileinfo import check_extnum
from .overplot_ds9reg import overplot_ds9reg
from ..stats import summary
from numina.visualization import ZScaleInterval
GLOBAL_ASPECT = 'auto'
GLOBAL_GEOMETRY = '0,0,800,600'
dum_str = '' # global variable in function keypress
dum_par = '' # global variable in function keypress
def check_wavelength_scale(crval1, cdelt1, ctype1, cunit1):
"""Check for wavelength calibration in the X axis.
Parameters
----------
crval1 : float or None
CRVAL1 parameter corresponding to wavelength calibration in
the X direction.
cdelt1 : float or None
CDELT1 parameter corresponding to wavelength calibration in
the X direction.
ctype1 : str or None
CTYPE1 parameter corresponding to wavelength calibration in
the X direction.
cunit1 : str or None
CUNIT1 parameter corresponding to wavelength calibration in
the X direction.
Returns
-------
result : bool
True in the wavelength calibration has been set.
False otherwise.
"""
result = False
if ctype1 is None and cunit1 is None:
return result
if ctype1 is not None:
if 'wavelength' in ctype1.lower():
result = True
if cunit1 is not None:
if 'angstrom' in cunit1.lower():
result = True
if result:
if crval1 is not None and cdelt1 is not None:
pass
else:
result = False
return result
def ximshow_jupyter(image2d, **args):
"""Auxiliary function to call ximshow from a jupyter notebook.
"""
return ximshow(image2d, using_jupyter=True, **args)
def ximshow(image2d, title=None, show=True,
cbar_label=None, cbar_orientation=None,
z1z2=None, cmap="hot",
image_bbox=None, first_pixel=(1, 1),
aspect=GLOBAL_ASPECT,
crpix1=None, crval1=None, cdelt1=None, ctype1=None, cunit1=None,
ds9regfile=None,
geometry=GLOBAL_GEOMETRY, figuredict=None,
tight_layout=True,
debugplot=0, using_jupyter=False):
"""Auxiliary function to display a numpy 2d array.
Parameters
----------
image2d : 2d numpy array, float
2d image to be displayed.
title : string
Plot title.
cbar_label : string
Color bar label.
cbar_orientation : string or None
Color bar orientation: valid options are 'horizontal' or
'vertical' (or None for no color bar).
show : bool
If True, the function shows the displayed image. Otherwise
the function just invoke the plt.imshow() function and
plt.show() is expected to be executed outside.
z1z2 : tuple of floats, string or None
Background and foreground values. If None, zcuts are employed.
cmap : string
Color map to be employed.
image_bbox : tuple (4 integers)
Image rectangle to be displayed, with indices given by
(nc1,nc2,ns1,ns2), which correspond to the numpy array:
image2d[(ns1-1):ns2,(nc1-1):nc2].
first_pixel : tuple (2 integers)
(x0,y0) coordinates of pixel at origin.
aspect : str
Control de aspect ratio of the axes. Valid values are 'equal'
and 'auto'.
crpix1 : float or None
CRPIX1 parameter corresponding to wavelength calibration in
the X direction.
crval1 : float or None
CRVAL1 parameter corresponding to wavelength calibration in
the X direction.
cdelt1 : float or None
CDELT1 parameter corresponding to wavelength calibration in
the X direction.
ctype1 : str or None
CTYPE1 parameter corresponding to wavelength calibration in
the X direction.
cunit1 : str or None
CUNIT1 parameter corresponding to wavelength calibration in
the X direction.
ds9regfile : file handler
Ds9 region file to be overplotted.
geometry : str or None
x, y, dx, dy values employed to set the window geometry.
tight_layout : bool
If True, and show=True, a tight display layout is set.
figuredict: dictionary
Parameters for ptl.figure(). Useful for pdf output.
For example: --figuredict "{'figsize': (8, 10), 'dpi': 100}"
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot
using_jupyter : bool
If True, this function is called from a jupyter notebook.
Returns
-------
ax : axes object
Matplotlib axes instance. This value is returned only when
'show' is False.
"""
from numina.array.display.matplotlib_qt import plt
if not show and using_jupyter:
plt.ioff()
# protections
if not isinstance(image2d, np.ndarray):
raise ValueError("image2d=" + str(image2d) +
" must be a numpy.ndarray")
elif image2d.ndim != 2:
raise ValueError("image2d.ndim=" + str(image2d.dim) +
" must be 2")
naxis2_, naxis1_ = image2d.shape
# check if wavelength calibration is provided
wavecalib = check_wavelength_scale(
crval1=crval1, cdelt1=1, ctype1=ctype1, cunit1=cunit1
)
# read bounding box limits
if image_bbox is None:
nc1 = 1
nc2 = naxis1_
ns1 = 1
ns2 = naxis2_
else:
nc1, nc2, ns1, ns2 = image_bbox
if 1 <= nc1 <= nc2 <= naxis1_:
pass
else:
raise ValueError("Invalid bounding box limits")
if 1 <= ns1 <= ns2 <= naxis2_:
pass
else:
raise ValueError("Invalid bounding box limits")
def get_current_zoom(ax_image, debug=False):
"""Return subimage corresponding to current zoom.
Parameters
----------
ax_image : axes
Image axes.
debug : bool
If True, the image corners are printed.
Returns
-------
subimage : numpy array (floats)
Subimage.
"""
xmin_image, xmax_image = ax_image.get_xlim()
ymin_image, ymax_image = ax_image.get_ylim()
ixmin = int(xmin_image + 0.5)
ixmax = int(xmax_image + 0.5)
iymin = int(ymin_image + 0.5)
iymax = int(ymax_image + 0.5)
ixmin -= first_pixel[0] - 1
ixmax -= first_pixel[0] - 1
iymin -= first_pixel[1] - 1
iymax -= first_pixel[1] - 1
if ixmin < nc1:
ixmin = nc1
if ixmin > nc2:
ixmin = nc2
if ixmax < nc1:
ixmax = nc1
if ixmax > nc2:
ixmax = nc2
if iymin < ns1:
iymin = ns1
if iymin > ns2:
iymin = ns2
if iymax < ns1:
iymax = ns1
if iymax > ns2:
iymax = ns2
if debug:
print("\n>>> xmin, xmax, ymin, ymax [pixels; origin (1,1)]:",
ixmin, ixmax, iymin, iymax)
return image2d[(iymin-1):iymax, (ixmin-1):ixmax]
def keypress(event):
"""Deal with keyboard events, allowing the update of vmin and vmax.
Note that a call to raw_input() is not allowed within this
function since, in that case, the following runtime error
is raised: can't re-enter readline
For that reason, the new vmin and vmax values should be
entered blindly.
To avoid collisions with navigation keyboard shortcuts,
check the table available at:
http://matplotlib.org/users/navigation_toolbar.html
"""
global dum_str
global dum_par
if event.key == "?":
print("""
Keyword events
==============
Home/Reset......................: h or r or home
Back............................: c or left arrow or backspace
Forward.........................: v or right arrow
Pan/Zoom........................: p
Zoom-to-rect....................: o
Save............................: ctrl + s
Toggle fullscreen...............: ctrl + f
Close plot......................: ctrl + w
Set zscale......................: /
Set bg=min and fg=max values....: ,
Display statistical summary.....: ;
Set foreground by keyboard......: m
Set background by keyboard......: n
Activate/deactivate ds9 regions.: a
Change aspect ratio.............: =
Constrain pan/zoom to x axis....: hold x when panning/zooming with mouse
Constrain pan/zoom to y axis....: hold y when panning/zooming with mouse
Preserve aspect ratio...........: hold CONTROL when panning/zooming with mouse
Toggle grid.....................: g when mouse is over an axes
Toggle x axis scale (log/linear): L or k when mouse is over an axes
Toggle y axis scale (log/linear): l when mouse is over an axes
""")
elif event.key == "/":
subimage2d = get_current_zoom(ax, debug=True)
new_vmin, new_vmax = ZScaleInterval().get_limits(subimage2d)
print(">>> setting cuts to vmin=" + str(new_vmin) +
" and vmax=" + str(new_vmax))
im_show.set_clim(vmin=new_vmin)
im_show.set_clim(vmax=new_vmax)
dum_str = ''
dum_par = ''
plt.show(block=False)
plt.pause(0.001)
elif event.key == ",":
subimage2d = get_current_zoom(ax, debug=True)
new_vmin = subimage2d.min()
new_vmax = subimage2d.max()
print(">>> setting cuts to vmin=" + str(new_vmin) +
" and vmax=" + str(new_vmax))
im_show.set_clim(vmin=new_vmin)
im_show.set_clim(vmax=new_vmax)
dum_str = ''
dum_par = ''
plt.show(block=False)
plt.pause(0.001)
elif event.key == ";":
subimage2d = get_current_zoom(ax, debug=True)
summary(subimage2d.flatten(), debug=True)
elif event.key == "n":
print("Type (blindly!) vmin <return>")
dum_str = ''
dum_par = "vmin"
elif event.key == "m":
print("Type (blindly!) vmax <return>")
dum_str = ''
dum_par = "vmax"
elif event.key == "=":
if ax.get_aspect() == 'equal':
ax.set_aspect('auto')
else:
ax.set_aspect('equal')
plt.tight_layout()
plt.show(block=False)
plt.pause(0.001)
elif event.key == "enter":
if dum_par == "vmin":
try:
new_vmin = float(dum_str)
except ValueError:
print("Invalid vmin=" + dum_str)
dum_str = ''
print("Type again (blindly!) vmin <return>")
else:
print("Setting vmin=" + dum_str)
im_show.set_clim(vmin=new_vmin)
dum_str = ''
dum_par = ''
plt.show(block=False)
plt.pause(0.001)
elif dum_par == "vmax":
try:
new_vmax = float(dum_str)
except ValueError:
print("Invalid vmax=" + dum_str)
dum_str = ''
print("Type again (blindly!) vmax <return>")
else:
print("Setting vmax=" + dum_str)
im_show.set_clim(vmax=new_vmax)
dum_str = ''
dum_par = ''
plt.show(block=False)
plt.pause(0.001)
else:
if dum_str == '':
dum_str = event.key
else:
dum_str += event.key
# plot limits
xmin = float(nc1) - 0.5 + (first_pixel[0] - 1)
xmax = float(nc2) + 0.5 + (first_pixel[0] - 1)
ymin = float(ns1) - 0.5 + (first_pixel[1] - 1)
ymax = float(ns2) + 0.5 + (first_pixel[1] - 1)
# display image
if figuredict is None:
fig = plt.figure()
else:
fig = plt.figure(**figuredict)
ax = fig.add_subplot(111)
ax.autoscale(False)
ax.set_xlabel('image pixel in the X direction')
ax.set_ylabel('image pixel in the Y direction')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.grid(False)
if z1z2 is None:
z1, z2 = ZScaleInterval().get_limits(
image2d[(ns1 - 1):ns2, (nc1 - 1):nc2]
)
elif z1z2 == "minmax":
z1 = image2d[(ns1 - 1):ns2, (nc1 - 1):nc2].min()
z2 = image2d[(ns1 - 1):ns2, (nc1 - 1):nc2].max()
else:
z1, z2 = z1z2
im_show = plt.imshow(image2d[(ns1 - 1):ns2, (nc1 - 1):nc2],
cmap=cmap, aspect=aspect,
vmin=z1, vmax=z2,
interpolation='nearest', origin='lower',
extent=[xmin, xmax, ymin, ymax])
if cbar_label is None:
cbar_label = "Number of counts"
if cbar_orientation in ["horizontal", "vertical"]:
plt.colorbar(im_show, shrink=1.0, label=cbar_label,
orientation=cbar_orientation)
if title is not None:
ax.set_title(title)
if ds9regfile is not None:
overplot_ds9reg(ds9regfile.name, ax)
# set the geometry
if geometry is not None:
tmp_str = geometry.split(",")
x_geom = int(tmp_str[0])
y_geom = int(tmp_str[1])
dx_geom = int(tmp_str[2])
dy_geom = int(tmp_str[3])
geometry_tuple = x_geom, y_geom, dx_geom, dy_geom
set_window_geometry(geometry_tuple)
# connect keypress event with function responsible for
# updating vmin and vmax
fig.canvas.mpl_connect('key_press_event', keypress)
# wavelength scale
if wavecalib:
if crpix1 is None:
crpix1 = 1.0
xminwv = crval1 + (xmin - crpix1) * cdelt1
xmaxwv = crval1 + (xmax - crpix1) * cdelt1
ax2 = ax.twiny()
ax2.grid(False)
ax2.set_xlim(xminwv, xmaxwv)
ax2.set_xlabel('Wavelength (Angstroms)')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# show plot or return axes
if show:
pause_debugplot(debugplot, pltshow=show, tight_layout=tight_layout)
else:
if tight_layout:
plt.tight_layout()
# return axes
if using_jupyter:
plt.ion()
return ax
def ximshow_file(singlefile,
extnum=1,
args_cbar_label=None, args_cbar_orientation=None,
args_z1z2=None, args_bbox=None, args_firstpix=None,
args_aspect=GLOBAL_ASPECT,
args_keystitle=None, args_ds9reg=None,
args_geometry=GLOBAL_GEOMETRY, pdf=None,
args_figuredict=None,
show=True,
debugplot=None,
using_jupyter=False):
"""Function to execute ximshow() as called from command line.
Parameters
----------
singlefile : string
Name of the FITS file to be displayed.
extnum : int
Extension number: 1 for first extension (default).
args_cbar_label : string
Color bar label.
args_cbar_orientation : string or None
Color bar orientation: valid options are 'horizontal' or
'vertical' (or None for no color bar).
args_z1z2 : string or None
String providing the image cuts tuple: z1, z2, minmax of None
args_bbox : string or None
String providing the bounding box tuple: nc1, nc2, ns1, ns2
args_firstpix : string or None
String providing the coordinates of lower left pixel.
args_aspect : str
args_aspect : str
Control de aspect ratio of the axes. Valid values are 'equal'
and 'auto'.
args_keystitle : string or None
Tuple of FITS keywords.format: key1,key2,...,keyn.format
args_ds9reg : file handler
Ds9 region file to be overplotted.
args_geometry : string or None
x, y, dx, dy to define the window geometry. This
information is ignored if args_pdffile is not None.
pdf : PdfFile object or None
If not None, output is sent to PDF file.
args_figuredict : string containing a dictionary
Parameters for ptl.figure(). Useful for pdf output.
For example: --figuredict "{'figsize': (8, 10), 'dpi': 100}"
show : bool
If True, the function shows the displayed image. Otherwise
the function just invoke the plt.imshow() function and
plt.show() is expected to be executed outside.
debugplot : integer or None
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
using_jupyter : bool
If True, this function is called from a jupyter notebook.
Returns
-------
ax : axes object
Matplotlib axes instance. This value is returned only when
'show' is False.
"""
# read z1, z2
if args_z1z2 is None:
z1z2 = None
elif args_z1z2 == "minmax":
z1z2 = "minmax"
else:
if args_z1z2[0] == '[':
tmp_str = args_z1z2[1:]
else:
tmp_str = args_z1z2
tmp_str = re.sub(']', '', tmp_str)
tmp_str = tmp_str.split(",")
z1z2 = float(tmp_str[0]), float(tmp_str[1])
# read input FITS file
hdulist = fits.open(singlefile)
if extnum is None or extnum < 1 or extnum > len(hdulist):
raise ValueError(f'Unexpected extension number {extnum}')
image_header = hdulist[extnum - 1].header
image2d = hdulist[extnum - 1].data
hdulist.close()
naxis1 = image_header['naxis1']
if 'naxis2' in image_header:
naxis2 = image_header['naxis2']
else:
naxis2 = 1
# read wavelength calibration
if 'crpix1' in image_header:
crpix1 = image_header['crpix1']
else:
crpix1 = None
if 'crval1' in image_header:
crval1 = image_header['crval1']
else:
crval1 = None
if 'cdelt1' in image_header:
cdelt1 = image_header['cdelt1']
else:
cdelt1 = None
if 'ctype1' in image_header:
ctype1 = image_header['ctype1']
else:
ctype1 = None
if 'cunit1' in image_header:
cunit1 = image_header['cunit1']
else:
cunit1 = None
# title for plot
title = singlefile
if args_keystitle is not None:
keystitle = args_keystitle
keysformat = ".".join(keystitle.split(".")[1:])
keysnames = keystitle.split(".")[0]
tuple_of_keyval = ()
for key in keysnames.split(","):
keyval = image_header[key]
tuple_of_keyval += (keyval,)
title += "\n" + str(keysformat % tuple_of_keyval)
if len(image2d.shape) == 1:
if image2d.shape != (naxis1,):
raise ValueError("Unexpected error with NAXIS1")
image2d = np.reshape(image2d, (1, naxis1))
elif len(image2d.shape) == 2:
if image2d.shape != (naxis2, naxis1):
raise ValueError("Unexpected error with NAXIS1, NAXIS2")
else:
raise ValueError("Unexpected number of dimensions > 2")
print('>>> File..:', singlefile)
print('>>> NAXIS1:', naxis1)
print('>>> NAXIS2:', naxis2)
# read bounding box
if args_bbox is None:
nc1 = 1
nc2 = naxis1
ns1 = 1
ns2 = naxis2
else:
tmp_bbox = args_bbox.split(",")
nc1 = int(tmp_bbox[0])
nc2 = int(tmp_bbox[1])
ns1 = int(tmp_bbox[2])
ns2 = int(tmp_bbox[3])
if nc1 < 1:
nc1 = 1
if nc2 > naxis1:
nc2 = naxis1
if ns1 < 1:
ns1 = 1
if ns2 > naxis2:
ns2 = naxis2
# read coordinates of lower left pixel
if args_firstpix is None:
nc0 = 1
ns0 = 1
else:
tmp_firstpix = args_firstpix.split(",")
nc0 = int(tmp_firstpix[0])
ns0 = int(tmp_firstpix[1])
# display image
if args_figuredict is None:
figuredict = None
else:
figuredict = eval(args_figuredict)
ax = ximshow(image2d=image2d, show=False,
cbar_label=args_cbar_label,
cbar_orientation=args_cbar_orientation,
title=title,
z1z2=z1z2,
image_bbox=(nc1, nc2, ns1, ns2),
first_pixel=(nc0, ns0),
aspect=args_aspect,
crpix1=crpix1,
crval1=crval1,
cdelt1=cdelt1,
ctype1=ctype1,
cunit1=cunit1,
ds9regfile=args_ds9reg,
geometry=args_geometry,
figuredict=figuredict,
debugplot=debugplot,
using_jupyter=using_jupyter)
if pdf is not None:
if show:
from numina.array.display.matplotlib_qt import plt
plt.tight_layout()
pdf.savefig()
else:
return ax
else:
if show:
pause_debugplot(debugplot, pltshow=True)
else:
# return axes
return ax
def jimshow(image2d,
ax=None,
title=None,
vmin=None, vmax=None,
image_bbox=None,
aspect=GLOBAL_ASPECT,
xlabel='image pixel in the X direction',
ylabel='image pixel in the Y direction',
crpix1=None, crval1=None, cdelt1=None, ctype1=None, cunit1=None,
grid=False,
cmap='hot',
cbar_label='Number of counts',
cbar_orientation='horizontal'):
"""Auxiliary function to display a numpy 2d array via axes object.
Parameters
----------
image2d : 2d numpy array, float
2d image to be displayed.
ax : axes object
Matplotlib axes instance. Note that this value is also
employed as output.
title : string
Plot title.
vmin : float, 'min', or None
Background value. If None, the minimum zcut is employed.
vmax : float, 'max', or None
Foreground value. If None, the maximum zcut is employed.
image_bbox : tuple (4 integers)
Image rectangle to be displayed, with indices given by
(nc1,nc2,ns1,ns2), which correspond to the numpy array:
image2d[(ns1-1):ns2,(nc1-1):nc2].
aspect : str
Control de aspect ratio of the axes. Valid values are 'equal'
and 'auto'.
xlabel : string
X-axis label.
ylabel : string
Y-axis label.
crpix1 : float or None
CRPIX1 parameter corresponding to wavelength calibration in
the X direction.
crval1 : float or None
CRVAL1 parameter corresponding to wavelength calibration in
the X direction.
cdelt1 : float or None
CDELT1 parameter corresponding to wavelength calibration in
the X direction.
ctype1 : str or None
CTYPE1 parameter corresponding to wavelength calibration in
the X direction.
cunit1 : str or None
CUNIT1 parameter corresponding to wavelength calibration in
the X direction.
grid : bool
If True, overplot grid.
cmap : string
Color map to be employed.
cbar_label : string
Color bar label.
cbar_orientation : string or None
Color bar orientation: valid options are 'horizontal' or
'vertical' (or None for no color bar).
Returns
-------
ax : axes object
Matplotlib axes instance. Note that this value must also
be provided as input.
"""
if ax is None:
raise ValueError('ax=None is not valid in this function')
naxis2_, naxis1_ = image2d.shape
if image_bbox is None:
nc1, nc2, ns1, ns2 = 1, naxis1_, 1, naxis2_
else:
nc1, nc2, ns1, ns2 = image_bbox
if 1 <= nc1 <= nc2 <= naxis1_:
pass
else:
raise ValueError("Invalid bounding box limits")
if 1 <= ns1 <= ns2 <= naxis2_:
pass
else:
raise ValueError("Invalid bounding box limits")
# plot limits
xmin = float(nc1) - 0.5
xmax = float(nc2) + 0.5
ymin = float(ns1) - 0.5
ymax = float(ns2) + 0.5
image2d_region = image2d[(ns1 - 1):ns2, (nc1 - 1):nc2]
if vmin is None or vmax is None:
z1, z2 = ZScaleInterval().get_limits(image2d_region)
else:
z1, z2 = None, None
if vmin is None:
vmin = z1
elif vmin == 'min':
vmin = image2d_region.min()
if vmax is None:
vmax = z2
elif vmax == 'max':
vmax = image2d_region.max()
im_show = ax.imshow(
image2d_region,
cmap=cmap, aspect=aspect, vmin=vmin, vmax=vmax,
interpolation="nearest", origin="lower",
extent=[xmin, xmax, ymin, ymax]
)
if cbar_orientation in ['horizontal', 'vertical']:
import matplotlib.pyplot as plt
plt.colorbar(im_show, shrink=1.0,
label=cbar_label, orientation=cbar_orientation,
ax=ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(grid)
if title is not None:
ax.set_title(title)
wavecalib = check_wavelength_scale(
crval1=crval1, cdelt1=1, ctype1=ctype1, cunit1=cunit1
)
if wavecalib:
if crpix1 is None:
crpix1 = 1.0
xminwv = crval1 + (xmin - crpix1) * cdelt1
xmaxwv = crval1 + (xmax - crpix1) * cdelt1
ax2 = ax.twiny()
ax2.grid(False)
ax2.set_xlim(xminwv, xmaxwv)
ax2.set_xlabel('Wavelength (Angstroms)')
def jimshowfile(filename,
extnum=1,
ax=None,
title=None,
vmin=None, vmax=None,
image_bbox=None,
aspect=GLOBAL_ASPECT,
xlabel='image pixel in the X direction',
ylabel='image pixel in the Y direction',
crpix1=None, crval1=None, cdelt1=None, ctype1=None, cunit1=None,
grid=False,
cmap='hot',
cbar_label='Number of counts',
cbar_orientation='horizontal'):
"""Auxiliary function to display a FITS image via axes object.
Parameters
----------
filename : string
Input FITS file name.
extnum : int
Extension number (1: primary)
ax : axes object
Matplotlib axes instance. Note that this value is also
employed as output.
title : string
Plot title.
vmin : float, 'min', or None
Background value. If None, the minimum zcut is employed.
vmax : float, 'max', or None
Foreground value. If None, the maximum zcut is employed.
image_bbox : tuple (4 integers)
Image rectangle to be displayed, with indices given by
(nc1,nc2,ns1,ns2), which correspond to the numpy array:
image2d[(ns1-1):ns2,(nc1-1):nc2].
aspect : str
Control de aspect ratio of the axes. Valid values are 'equal'
and 'auto'.
xlabel : string
X-axis label.
ylabel : string
Y-axis label.
crpix1 : float or None
CRPIX1 parameter corresponding to wavelength calibration in
the X direction.
crval1 : float or None
CRVAL1 parameter corresponding to wavelength calibration in
the X direction.
cdelt1 : float or None
CDELT1 parameter corresponding to wavelength calibration in
the X direction.
ctype1 : str or None
CTYPE1 parameter corresponding to wavelength calibration in
the X direction.
cunit1 : str or None
CUNIT1 parameter corresponding to wavelength calibration in
the X direction.
grid : bool
If True, overplot grid.
cmap : string
Color map to be employed.
cbar_label : string
Color bar label.
cbar_orientation : string or None
Color bar orientation: valid options are 'horizontal' or
'vertical' (or None for no color bar).
Returns
-------
ax : axes object
Matplotlib axes instance. Note that this value must also
be provided as input.
"""
# read input FITS file
hdulist = fits.open(filename)
if extnum is None or extnum < 1 or extnum > len(hdulist):
raise ValueError(f'Unexpected extension number {extnum}')
image2d = hdulist[extnum - 1].data
hdulist.close()
return jimshow(image2d,
ax=ax,
title=title,
vmin=vmin, vmax=vmax,
image_bbox=image_bbox,
aspect=aspect,
xlabel=xlabel,
ylabel=ylabel,
crpix1=crpix1, crval1=crval1, cdelt1=cdelt1,
ctype1=ctype1, cunit1=cunit1,
grid=grid,
cmap=cmap,
cbar_label=cbar_label,
cbar_orientation=cbar_orientation)
def main(args=None):
# parse command-line options
parser = argparse.ArgumentParser(
description='description: display FITS images'
)
# positional arguments
parser.add_argument("filename",
help="FITS file (wildcards allowed) "
"or txt file with list of FITS files",
nargs="+")
# optional arguments
parser.add_argument('--extnum',
help='Extension number in input files (note that ' +
'first extension is 1 = default value)',
default=1, type=int)
parser.add_argument("--z1z2",
help="tuple [z1,z2], minmax or None (use zscale)",
type=str)
parser.add_argument("--bbox",
help="bounding box tuple: nc1,nc2,ns1,ns2")
parser.add_argument("--firstpix",
help="coordinates of lower left pixel: nc0, ns0")
parser.add_argument("--aspect",
help="aspect ratio (equal or auto)",
type=str,
choices=['equal', 'auto'], default=GLOBAL_ASPECT)
parser.add_argument("--cbar_label",
help="color bar label",
type=str, default='Number of counts')
parser.add_argument("--cbar_orientation",
help="color bar orientation",
type=str,
choices=['horizontal', 'vertical', None],
default='horizontal')
parser.add_argument("--keystitle",
help="tuple of FITS keywords.format: " +
"key1,key2,...keyn.'format'")
parser.add_argument("--ds9reg",
help="ds9 region file to be overplotted",
type=argparse.FileType('rt'))
parser.add_argument("--geometry",
help='string "x,y,dx,dy"',
default=GLOBAL_GEOMETRY)
parser.add_argument("--pdffile",
help="ouput PDF file name",
type=argparse.FileType('w'))
parser.add_argument("--figuredict",
help="string with dictionary of parameters for"
"plt.figure()",
type=str)
parser.add_argument("--debugplot",
help="Integer indicating plotting/debugging" +
" (default=12)",
default=12, type=int,
choices=[0, 1, 2, 10, 11, 12, 21, 22])
args = parser.parse_args(args)
if abs(args.debugplot) in [21, 22]:
print('>> args.filename: ', args.filename)
if len(args.filename) == 1:
list_fits_files = []
list_extnum = []
for tmp in list_fileinfo_from_txt(args.filename[0]):
list_fits_files.append(tmp.filename)
list_extnum.append(tmp.extnum)
else:
list_fits_files = []
list_extnum = []
for tmp in args.filename:
tmpfile, tmpextnum = check_extnum(tmp)
for tmptmp in list_fileinfo_from_txt(tmpfile):
list_fits_files.append(tmptmp.filename)
list_extnum.append(tmpextnum)
list_extnum = [args.extnum if dum is None else dum for dum in list_extnum]
if abs(args.debugplot) in [21, 22]:
print('>> Filenames.: ', list_fits_files)
print('>> Extensions: ', list_extnum)
# read pdffile
if args.pdffile is not None:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages(args.pdffile.name)
else:
from numina.array.display.matplotlib_qt import plt
pdf = None
for myfile, extnum in zip(list_fits_files, list_extnum):
if extnum is None:
extnum = args.extnum
ximshow_file(singlefile=myfile,
extnum=extnum,
args_z1z2=args.z1z2,
args_bbox=args.bbox,
args_firstpix=args.firstpix,
args_aspect=args.aspect,
args_cbar_label=args.cbar_label,
args_cbar_orientation=args.cbar_orientation,
args_keystitle=args.keystitle,
args_ds9reg=args.ds9reg,
args_geometry=args.geometry,
pdf=pdf,
args_figuredict=args.figuredict,
debugplot=args.debugplot)
if pdf is not None:
pdf.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
Barmaley-exe/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
tallakahath/pymatgen | pymatgen/io/abinit/tests/test_abiinspect.py | 4 | 3870 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.abiinspect import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
def test_base(self):
string = \
"""---
none: [~, null]
bool: [true, false, on, off]
int: 42
float: 3.14159
list: [LITE, RES_ACID, SUS_DEXT]
dict: {hp: 13, sp: 5}
...
this is not a YAML document!
and the tokenizer will ignore it
--- !Monster
name: Cave spider
hp: [2,6] # 2d6
ac: 16
attacks: [BITE, HURT]
...
This is not a proper document since it does not start with ---
the end tag below is ignored
...
--- !Monster
name: Dragon
hp: [2,6] # 2d6
ac: 32
attacks: [BITE, HURT]
...
"""
#for i, line in enumerate(string.splitlines()): print(i, line)
fd, filename = tempfile.mkstemp(text=True)
with open(filename, "w") as fh:
fh.write(string)
doc_tags = [None, "!Monster", "!Monster"]
doc_linenos = [1, 13, 23]
with YamlTokenizer(filename) as r:
# Iterate the docs
n = 0
for i, doc in enumerate(r):
n += 1
print("doc", doc)
self.assertTrue(doc.tag == doc_tags[i])
self.assertTrue(doc.lineno == doc_linenos[i])
self.assertTrue(n == len(doc_tags))
# Read all docs present in the file.
r.seek(0)
all_docs = r.all_yaml_docs()
#print(all_docs)
self.assertTrue(len(all_docs) == 3)
# We should be at the begining at the file.
self.assertTrue(all_docs == r.all_yaml_docs())
# Find documents by tag.
r.seek(0)
monster = r.next_doc_with_tag("!Monster")
#print("monster",monster)
self.assertTrue(monster == all_docs[1])
monster = r.next_doc_with_tag("!Monster")
self.assertTrue(monster == all_docs[2])
# this should raise StopIteration
with self.assertRaises(StopIteration):
monster = r.next_doc_with_tag("!Monster")
# os.remove(filename)
class AbinitInpectTest(PymatgenTest):
def test_scfcycle(self):
"""Testing ScfCycle."""
cycle = GroundStateScfCycle.from_file(ref_file("mgb2_scf.abo"))
print(cycle)
assert cycle.num_iterations == 6
last = cycle.last_iteration
assert last["Etot(hartree)"] == -7.1476241568657 and last["vres2"] == 3.879E-08
assert list(cycle["vres2"]) == [1.769E+02, 7.920E-01, 1.570E-01, 4.259E-03, 4.150E-05, 3.879E-08]
if have_matplotlib:
cycle.plot(show=False)
def test_relaxation(self):
"""Testing Relaxation object."""
relaxation = Relaxation.from_file(ref_file("sic_relax.abo"))
print(relaxation)
assert len(relaxation) == 4
assert relaxation[0]["Etot(hartree)"][-1] == -8.8077409200473
assert relaxation[-1]["Etot(hartree)"][-1] == -8.8234906607147
for scf_step in relaxation:
print(scf_step.num_iterations)
if have_matplotlib:
relaxation.plot(show=False)
if __name__ == '__main__':
import unittest
unittest.main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.