repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
SteVwonder/advent_of_code | 2016/day01/day01.py | 1 | 2848 | class Position(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __iter__(self):
yield self.x
yield self.y
def __str__(self):
return "<{}, {}>".format(self.x, self.y)
@property
def tuple(self):
return (self.x, self.y)
@property
def blocks_from_origin(self):
return sum([abs(x) for x in iter(self)])
class Walker(object):
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
def __init__(self):
self.position = Position(0,0)
self.direction = Walker.NORTH
self.previous_locations = set([self.position.tuple])
#self.previous_locations = [self.position.tuple]
def turn(self, direction_str):
if direction_str == 'L':
self.direction -= 1
elif direction_str == 'R':
self.direction += 1
else:
raise RuntimeError('Unknown direction: {}'.format(direction_str))
self.direction = self.direction % 4
def walk(self, length):
if self.direction == Walker.NORTH:
self.position.y += length
elif self.direction == Walker.EAST:
self.position.x += length
elif self.direction == Walker.SOUTH:
self.position.y -= length
elif self.direction == Walker.WEST:
self.position.x -= length
else:
raise RuntimeError('Invalid direction: {}'.format(self.direction))
def check_overlap(self):
if self.position.tuple in self.previous_locations:
return self.position.tuple
return None
def walk_and_check_overlap(self, length):
overlap = None
for x in xrange(length):
self.walk(1)
if overlap is None:
overlap = self.check_overlap()
self.previous_locations.add(self.position.tuple)
return overlap
@property
def blocks_from_origin(self):
return self.position.blocks_from_origin
def plot_path(self):
import matplotlib.pyplot as plt
xs, ys = zip(*self.previous_locations)
plt.plot(xs, ys)
plt.show()
def main():
with open('input.txt', 'r') as fp:
line = fp.readline().rstrip()
instructions = line.split(', ')
walker = Walker()
first_overlap_distance = None
for instruction in instructions:
direction = instruction[0]
length = int(instruction[1:])
walker.turn(direction)
overlap = walker.walk_and_check_overlap(length)
if first_overlap_distance is None and overlap is not None:
first_overlap_distance = Position(*overlap).blocks_from_origin
print "Part 1: {}".format(walker.blocks_from_origin)
print walker.position.tuple
print "Part 2: {}".format(first_overlap_distance)
if __name__ == "__main__":
main()
| mit |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py | 69 | 42655 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
| agpl-3.0 |
tdhopper/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
akullpp/ClusterPy | src/bhmm.py | 1 | 9444 | from codecs import open
from random import randint, uniform
from collections import defaultdict
from math import log
from utility import change_count
from utility import get_value
class BHMM(object):
""" Bayesian Hidden Markov Model with Gibbs sampling. """
def __init__(self, args):
# Input file
self.fin = args.input
# Output file
self.fout = args.output
# Number of possible labels
self.labels = args.labels
# Number of sampling iterations
self.iterations = args.iterations
# self.transition hyperparameter
self.alpha = args.alpha
# self.emission hyperparameter
self.beta = args.beta
# Lists of observations
self.data = []
# Uniform distribution of observations
self.frequencies = defaultdict(float)
# Delimits single observation
self.delimiter = " "
# Emission matrix: C(previous label, label)
self.emission = defaultdict(int)
# Transition Matrix: C(label, emission)
self.transition = defaultdict(int)
# Lists labels sequences
self.sequences = []
# Base probability
self.base_probability = 1.0 / self.labels
# Logarithmic likelihoods for each iteration
self.likelihoods = []
def __read_data(self):
""" Creates a uniform distribution. """
print "Reading corpus"
with open(self.fin, encoding="utf8") as f:
for line in f:
# Sequence of observations
unit = ["START"]
for item in line.split(self.delimiter):
item = item.strip()
unit.append(item)
self.frequencies[item] += 1.0
unit.append("END")
self.data.append(unit)
def __create_frequencies(self):
""" Calculate relative frequency. """
print "Creating frequencies"
# Total number of observations
total = sum(self.frequencies.values())
for key in self.frequencies.keys():
self.frequencies[key] /= total
def __create_matrixes(self):
""" Creates transition and emission matrix. """
print "Creating matrixes"
for unit in self.data:
# Ordered list of hidden labels framed by -1
sequence = [-1]
for observation in unit[1:-1]:
# Assign random label to observation
label = randint(0, self.labels - 1)
# Add C(label|previous label)
change_count(self.transition, label, sequence[-1], 1)
# Add C(emission|label)
change_count(self.emission, observation, label, 1)
sequence.append(label)
# Last transition add C(-1|previous label)
change_count(self.transition, "-1", sequence[-1], 1)
sequence.append(-1)
# Add sequence of observations list of sequences
self.sequences.append(sequence)
def __initialize_model(self):
""" Initializes the HMM """
print "Initializing model"
self.__read_data()
print "Corpus read"
self.__create_frequencies()
print "Frequencies created"
self.__create_matrixes()
print "Matrixes created"
def __compute_probability(self, matrix, items, base, hyper):
""" Calculating posterior.
Arguments:
matrix -- transition or emission
items -- (hypothesis, evidence)
base -- base probability
hyper -- hyperparameter
"""
x = get_value(matrix, items[0], items[1])
y = get_value(matrix, items[1])
return (x + base * hyper) / (y + hyper)
def __sample_label(self, probabilities):
""" Sample label.
Arguments:
probabilities -- probabilities of all labels
"""
z = sum(probabilities)
remaining = uniform(0, z)
for probability in probabilities:
remaining -= probability
if remaining <= 0:
return probabilities.index(probability)
def __compute_label_probabilities(self, blanket):
""" Computes the probability of each label.
Arguments:
blanket -- Markov blanket
"""
_, previous_label, following_label, current_observation = blanket
# Probabilities of each possible label
probabilities = []
for label in xrange(self.labels):
# Chain rule
probability = (self.__compute_probability(self.transition,
(label, previous_label),
self.base_probability,
self.alpha) *
self.__compute_probability(self.transition,
(following_label, label),
self.base_probability,
self.alpha) *
self.__compute_probability(self.emission,
(current_observation, label),
self.frequencies[current_observation],
self.beta))
probabilities.append(probability)
return probabilities
def __write_labeled_data(self):
""" Writes labeled data to output file. """
print "Writing data"
with open(self.fout, "w", encoding="utf8") as f:
for i in xrange(len(self.data)):
labeled_unit = []
for j in xrange(len(self.sequences[i])):
labeled_unit.append("%s/%s" % (self.data[i][j],
self.sequences[i][j]))
f.write("%s\n" % " ".join(labeled_unit[1:-1]))
def draw_likelihood(self):
""" Draw the logarithmic likelihood. """
from matplotlib import pyplot
print "Drawing likelihood"
pyplot.plot(self.likelihoods[::self.iterations / 100])
pyplot.show()
def __compute_label_likelihood(self, blanket):
""" Likelihood of label sequence.
Arguments:
blanket - Markov blanket
"""
current_label, previous_label, following_label, current_observation = blanket
# P(label|previous label)
p = self.__compute_probability(self.transition,
(current_label, previous_label),
self.base_probability,
self.alpha) * \
self.__compute_probability(self.emission,
(current_observation, current_label),
self.frequencies[current_observation],
self.beta)
# Last transition
if following_label == -1:
# P(following label|label)
p *= self.__compute_probability(self.transition,
(following_label, current_label),
self.base_probability,
self.alpha)
return p
def __change_sample(self, blanket, i):
""" Adds (i = 1) or removes (i = -1) a sample.
Arguments:
blanket -- affected labels
i -- add or remove
"""
current_label, previous_label, following_label, current_observation = blanket
change_count(self.transition, current_label, previous_label, i)
change_count(self.transition, following_label, current_label, i)
change_count(self.emission, current_observation, current_label, i)
def run(self):
""" Gibbs sampling. """
self.__initialize_model()
print "Model initialized\nStarting iterations\n"
for _ in xrange(self.iterations):
likelihood = 0.0
for i, sequence in enumerate(self.sequences):
for j in xrange(1, len(self.sequences[i]) - 1):
# Markov blanket affected by changing label
blanket = [sequence[j],
sequence[j - 1],
sequence[j + 1],
self.data[i][j]]
# Remove sample
self.__change_sample(blanket, -1)
# Probabilities of each label
probabilities = self.__compute_label_probabilities(blanket)
# Sample current label
sequence[j] = self.__sample_label(probabilities)
# Update blanket
blanket[0] = sequence[j]
# Likelihood of current label
p = self.__compute_label_likelihood(blanket)
likelihood += log(p)
# Add sample
self.__change_sample(blanket, 1)
print "Iteration %s \t Likelihood %f" % (_ + 1, likelihood)
self.likelihoods.append(likelihood)
print "\nIterations finished"
self.__write_labeled_data()
print "Data written" | mit |
phamngtuananh/Singaboat_RobotX2016 | robotx_nav/nodes/task2_toplevel_try.py | 3 | 4644 | #!/usr/bin/env python
import multiprocessing as mp
import rospy
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
import numpy as np
from sklearn.cluster import KMeans, DBSCAN
from sklearn import svm
from move_base_loiter import Loiter
from move_base_waypoint import MoveTo
from color_totem_planner import ColorTotemPlanner
# import tf
# from math import pi, cos, sin
# from move_base_util import MoveBaseUtil
import time
def loiter_worker(v_dict_q, q):
""" go to gps point """
p = mp.current_process()
print p.name, p.pid, 'Starting'
loiter_obj = Loiter("loiter", is_newnode=True, target=None,
radius=2.5, polygon=4, is_ccw=True, is_relative=False)
visited_dict = {"red": False, "green": False, "blue": False, "yellow": False}
# spawn the gps coordinate, one time only
while True:
cid, target, radius, polygon, is_ccw = q.get()
print "from planner", target
if target[2] < -1e6: # unless send a -inf z by waypoint pub: terminating
break
else:
loiter_obj.respawn(target, polygon, radius, is_ccw)
visited_dict[cid] = True
v_dict_q.put(visited_dict) # dont hold moveto
print p.name, p.pid, 'Exiting'
def moveto_worker(q, hold_move_q):
""" constant heading to pass the gate,
need roi_target_identifier to give/update waypoint """
p = mp.current_process()
print p.name, p.pid, 'Starting'
# get the waypoints, loop wait for updates
moveto_obj = MoveTo("moveto", is_newnode=True, target=None, is_relative=False)
while True:
target = q.get()
print target
if target[2] < -1e6: # unless send a -inf z by waypoint pub: terminating
break
else:
moveto_obj.respawn(target)
hold_move_q.put(False)
print p.name, p.pid, 'Exiting'
# not required
# def cancel_goal_worker(conn, repetition):
# """ asynchronously cancel goals"""
# p = mp.current_process()
# print p.name, p.pid, 'Starting'
# while True:
# command = conn.recv()
# print 'child: ', command
# if command == 'cancel': # cancel goal
# print 'doing cancelling'
# force_cancel = ForceCancel(nodename="forcecancel", repetition=repetition)
# conn.send('cancelled')
# elif command == 'exit': # complete
# print "cancel goal complete, exit"
# break
# else: # conn.recv() == 0, idle, wait for command
# pass
# time.sleep()
#
# print p.name, p.pid, 'Exiting'
def planner_worker(v_dict_q, loiter_q, moveto_q, hold_move_q, cancel_conn):
""" plan for totems """
p = mp.current_process()
print p.name, p.pid, 'Starting'
planner_obj = ColorTotemPlanner("color_planner")
while True:
if not v_dict_q.empty(): # get update from loiter on visited
visited_dict = v_dict_q.get()
planner_obj.update_visit(visited_dict) # update visited
if not hold_move_q.empty(): # get update from moveto on success
hol = hold_move_q.get() # free moveto to be on hold after moveto finished
planner_obj.update_hold_moveto(hol)
isready, loiter_target, moveto_target, allvisited, hold_moveto = planner_obj.planner() # try to find onhold loiter target
# print isready
if allvisited: # all visited, kill all worker and exit
poison_pill = [0, 0, -float("inf")]
loiter_q.put([None, poison_pill, None, None, None])
# need an exit target
if moveto_target != []:
moveto_q.put(moveto_target)
# finally kill moveto
time.sleep(1)
moveto_q.put(poison_pill)
break
elif isready and not allvisited and loiter_target != []: # still have pending loiter points
print "loiter called"
loiter_q.put(loiter_target)
elif not isready and not hold_moveto and not allvisited and moveto_target != []: # need to explore for valid loiter points
print "moveto called"
moveto_q.put(moveto_target)
print p.name, p.pid, 'Exiting'
if __name__ == "__main__":
moveto_q = mp.Queue()
hold_moveto_q = mp.Queue()
cancel_p_conn, cancel_c_conn = mp.Pipe()
# loiter_p_conn, loiter_c_conn = mp.Pipe()
loiter_q = mp.Queue(1)
v_dict_q = mp.Queue(1)
# manager = mp.Manager()
# visited_dict = manager.dict()
# visited_dict = {"red": False, "green": False, "blue": False, "yellow": False}
loiter_mp = mp.Process(name="ltr", target=loiter_worker, args=(v_dict_q, loiter_q,))
moveto_mp = mp.Process(name="mvt", target=moveto_worker, args=(moveto_q, hold_moveto_q,))
# cancel_goal_mp = mp.Process(name="ccg", target=cancel_goal_worker, args=(cancel_p_conn, 5,))
planner_mp = mp.Process(name="pln", target=planner_worker, args=(v_dict_q, loiter_q, moveto_q, hold_moveto_q, cancel_c_conn,))
loiter_mp.start()
moveto_mp.start()
# cancel_goal_mp.start()
planner_mp.start()
# close
loiter_mp.join()
moveto_mp.join()
planner_mp.join()
| gpl-3.0 |
msbeta/apollo | modules/tools/mapshow/libs/subplot_traj_path.py | 3 | 2931 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
class TrajPathSubplot:
def __init__(self, ax):
self.ax = ax
self.path_lines = []
self.path_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.path_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=8,
alpha=0.3)
self.path_lines.append(line)
ax.set_xlabel("x (m)")
# ax.set_xlim([-2, 10])
# ax.set_ylim([-6, 6])
self.ax.autoscale_view()
# self.ax.relim()
# ax.set_ylabel("y (m)")
ax.set_title("PLANNING ACC")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = range(self.path_lines_size)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.path_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_path_x_history)):
if i >= self.path_lines_size:
print "WARNING: number of path lines is more than " \
+ str(self.path_lines_size)
continue
speed_line = self.path_lines[self.path_lines_size - i - 1]
speed_line.set_xdata(planning.traj_path_x_history[i])
speed_line.set_ydata(planning.traj_path_y_history[i])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
| apache-2.0 |
ufieeehw/IEEE2015 | ros/ieee2015_localization/nodes/localize_lie.py | 1 | 9753 | #!/usr/bin/env python
from __future__ import division
import cv2
import rospy
import numpy as np
import tf.transformations as tf_trans
from std_msgs.msg import Header
from geometry_msgs.msg import Twist, Point, Pose, PoseStamped, Quaternion
import roslib
roslib.load_manifest('ieee2015_vision')
from ros_image_tools import Image_Subscriber
roslib.load_manifest('ieee2015_localization')
from slam.registration import similarity, translation, similarity_fast
from time import time
import math
from matplotlib import pyplot
from collections import deque
'''To use this, do:
rosrun ieee2015_simulator view_simulation
rosrun ieee2015_localization localize_lie.py
'''
DEBUG = True
class Keyframe(object):
def __init__(self):
pass
class Localization(object):
def __init__(self):
rospy.init_node('localization')
self.map_size = 750
self.reset()
self.pose_pub = rospy.Publisher('pose', PoseStamped, queue_size=3)
self.desired_pose_pub = rospy.Publisher('desired_pose', PoseStamped, queue_size=3)
# Size we shrink incoming images to (k x k)
self.shrink_size = 200
self.ones_mask = np.ones((self.shrink_size, self.shrink_size))
self.image_scale = 0.5 # How much we shrink (i.e. 0.5 as many pixels represent same length)
self.world_scale = (0.3 / 45) * (self.image_scale * 2)
self.image_sub = Image_Subscriber(
'/robot/base_camera/down_view',
self.image_cb_matrix,
encoding="8UC1",
)
'''
if DEBUG:
cv2.namedWindow("map")
cv2.setMouseCallback("map", self.on_mouse)
'''
def imshow(self, name, image):
if DEBUG:
cv2.imshow(name, image)
def on_mouse(self, *args):
if args[0] == 1:
self.des_pos = np.array([args[1], args[2]]) - (462, 571)
# - self.keyframe_position) - (342, 395)
# self.publish_desired_pose(-self.des_pos[0], -self.des_pos[1], 0.0)
def reset(self):
'''
odom_h_i2root -> this is the 'odometric' frame, it's only "okay", and subject to drift
'''
self.full_map = np.zeros((self.map_size, self.map_size), np.uint8)
self.keyframe_image = None
self.keyframe_scale = 1.0
self.keyframe_orientation = 0.0
self.keyframe_position = (self.map_size // 2, self.map_size // 2)
self.h_k2root = np.eye(3)
self.odom_h_i2root = np.eye(3)
def reset_map(self):
self.full_map = np.zeros((self.map_size, self.map_size), np.uint8)
def motion_from_matrix(self, matrix):
# assert matrix.shape == (3, 3), "Requires 3x3 matrix in homogeneous space"
a = matrix[0, 0]
b = matrix[0, 1]
c = matrix[1, 0]
d = matrix[1, 1]
scale_x = np.sign(a) * np.sqrt((a**2) + (b**2))
scale_y = np.sign(d) * np.sqrt((c**2) + (d**2))
angle = -np.arctan2(-b, a)
translation_x = matrix[0, 2]
translation_y = matrix[1, 2]
return angle, translation_x, translation_y, scale_x, scale_y
def warp(self, image, matrix):
matrix = matrix[:2, :]
return cv2.warpAffine(image, matrix, image.shape)
def make_homogeneous(self, matrix):
assert max(matrix.shape) == 3, "Matrix must have 3 columns!"
return np.vstack([matrix, [0, 0, 1]])
def make_2D_rotation(self, angle):
c, s = np.cos(angle), np.sin(angle)
mat = np.matrix([
[c, -s],
[s, c],
],
dtype=np.float32)
return mat
def rotate(self, image, angle, scale=1.0):
rows, cols = image.shape
# rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, scale)
rot_M = cv2.getRotationMatrix2D((0.0 / 2, 0.0 / 2), angle, scale)
rotated = cv2.warpAffine(image, rot_M, (cols, rows))
return rotated
def translate(self, image, tx, ty, scale=1.0):
rows, cols = image.shape
trans_M = np.float32([
[1.0 / scale, 0.0, tx],
[0.0, 1.0 / scale, ty],
])
translated = cv2.warpAffine(image, trans_M, (cols, rows))
return translated
def publish_pose(self, tx, ty, angle):
_orientation = tf_trans.quaternion_from_euler(0, 0, -angle)
self.pose_pub.publish(
PoseStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/course',
),
pose = Pose(
position = Point(-ty * self.world_scale, -tx * self.world_scale, 0.0),
orientation = Quaternion(*_orientation), # Radians
)
)
)
def publish_desired_pose(self, ty, tx, angle):
_orientation = tf_trans.quaternion_from_euler(0, 0, -angle)
self.desired_pose_pub.publish(
PoseStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/course',
),
pose = Pose(
position = Point(tx * self.world_scale, ty * self.world_scale, 0.0),
orientation = Quaternion(*_orientation), # Radians
)
)
)
def overlay(self, image, x_offset, y_offset, other_image):
greater = image > 1
# Image Blending
other_image[
x_offset: x_offset + image.shape[0],
y_offset: y_offset + image.shape[1],
] *= np.ones(image.shape) - (greater * 0.5)
other_image[
x_offset: x_offset + image.shape[0],
y_offset: y_offset + image.shape[1],
] += image * 0.5
def stitch_matrix(self, new_image):
'''stitch_matrix(new_image)
h -> homogeneous
i -> image
k -> keyframe
root -> root
'''
im_to_keyframe = cv2.estimateRigidTransform(new_image, self.keyframe_image, False)
if im_to_keyframe is None:
print '>Failed to map<'
return False, None
h_im_to_keyframe = self.make_homogeneous(im_to_keyframe)
ang_i2k, dx_i2k, dy_i2k, sx, sy = self.motion_from_matrix(im_to_keyframe)
if (np.fabs(sx - 1.0) > 0.01) or (np.fabs(sy - 1.0) > 0.01):
# print sx, sy
return False, h_im_to_keyframe
h_i2root = np.dot(self.h_k2root, h_im_to_keyframe)
ang_i2root, dx_i2root, dy_i2root, sxroot, syroot = self.motion_from_matrix(h_i2root)
rotated = self.rotate(new_image, np.degrees(ang_i2root), scale=sx)
# self.imshow("Rotated", rotated)
self.overlay(
rotated,
self.keyframe_position[0] + dy_i2root,
self.keyframe_position[1] + dx_i2root,
self.full_map
)
self.imshow("map", self.full_map)
return True, h_im_to_keyframe
def largest_contour(self, image):
contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
largest_ctr = max(contours, key=cv2.contourArea)
empty_image = np.zeros(image.shape, np.uint8)
cv2.drawContours(empty_image, [largest_ctr], 0, 255, thickness=-20)
return empty_image
def image_cb_matrix(self, image_msg):
key_press = cv2.waitKey(1)
if key_press & 0xFF == ord('r'):
self.reset()
if key_press & 0xFF == ord('f'):
self.reset_map()
image_msg = np.squeeze(image_msg)
# image_fixed = self.fix_size(image_msg, size=self.shrink_size)
image_fixed = cv2.resize(image_msg, (int(image_msg.shape[0] * self.image_scale), int(image_msg.shape[1] * self.image_scale)))
ret, image = cv2.threshold(image_fixed, 150, 255, cv2.THRESH_BINARY)
image = self.largest_contour(image)
if self.keyframe_image is None:
self.keyframe_image = image
self.keyframe_position = ((self.map_size // 2) - (image.shape[0]//2), (self.map_size // 2) - (image.shape[1]//2))
return
self.imshow("Input Image", image)
tic = time()
good_match, h_i2k = self.stitch_matrix(image)
if h_i2k is None:
return
# We have some kind of match!
ang_i2k, dx_i2k, dy_i2k, sx, sy = self.motion_from_matrix(h_i2k)
# print '----Match Success---'
# Check if we matched well
self.odom_h_i2root = np.dot(self.h_k2root, h_i2k)
ang_i2root, dx_i2root, dy_i2root, sx, sy = self.motion_from_matrix(self.odom_h_i2root)
if np.fabs(sx - 1) < 0.1:
print 'x{} y{}'.format(dx_i2root, dy_i2root)
self.publish_pose(dx_i2root, dy_i2root, ang_i2root)
if good_match is False:
return
# if ((np.linalg.norm([dx_i2k, dy_i2k]) > 4) or
# (np.fabs(ang_i2k) > 0.05)):
print "Keyframing-----"
self.keyframe_image = image
self.h_k2root = np.dot(self.h_k2root, h_i2k)
# This is correct, evidence:
# self.imshow("warped", self.warp(image, self.h_k2root))
print self.h_k2root
def fix_size(self, image, size=200):
'''Takes an image, makes it square, resizes it
Size is an integer, and we will reshape to (size, size)'''
shape = image.shape
square_shape = min(shape)
half_shape = square_shape // 2
center_x, center_y = shape[0] // 2, shape[1] // 2
squared = image[center_x - half_shape:center_x + half_shape, center_y - half_shape:center_y + half_shape]
sized = cv2.resize(squared, (size, size))
return sized
if __name__ == '__main__':
localization = Localization()
rospy.spin() | gpl-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/image_nonuniform.py | 9 | 1488 | '''
This illustrates the NonUniformImage class, which still needs
an axes method interface; either a separate interface, or a
generalization of imshow.
'''
from matplotlib.pyplot import figure, show
import numpy as np
from matplotlib.image import NonUniformImage
from matplotlib import cm
interp='nearest'
x = np.linspace(-4, 4, 9)
x2 = x**3
y = np.linspace(-4, 4, 9)
#print 'Size %d points' % (len(x) * len(y))
z = np.sqrt(x[np.newaxis,:]**2 + y[:,np.newaxis]**2)
fig = figure()
fig.suptitle('NonUniformImage class')
ax = fig.add_subplot(221)
im = NonUniformImage(ax, interpolation=interp, extent=(-4,4,-4,4),
cmap=cm.Purples)
im.set_data(x, y, z)
ax.images.append(im)
ax.set_xlim(-4,4)
ax.set_ylim(-4,4)
ax.set_title(interp)
ax = fig.add_subplot(222)
im = NonUniformImage(ax, interpolation=interp, extent=(-64,64,-4,4),
cmap=cm.Purples)
im.set_data(x2, y, z)
ax.images.append(im)
ax.set_xlim(-64,64)
ax.set_ylim(-4,4)
ax.set_title(interp)
interp = 'bilinear'
ax = fig.add_subplot(223)
im = NonUniformImage(ax, interpolation=interp, extent=(-4,4,-4,4),
cmap=cm.Purples)
im.set_data(x, y, z)
ax.images.append(im)
ax.set_xlim(-4,4)
ax.set_ylim(-4,4)
ax.set_title(interp)
ax = fig.add_subplot(224)
im = NonUniformImage(ax, interpolation=interp, extent=(-64,64,-4,4),
cmap=cm.Purples)
im.set_data(x2, y, z)
ax.images.append(im)
ax.set_xlim(-64,64)
ax.set_ylim(-4,4)
ax.set_title(interp)
show()
| mit |
jramcast/ml_weather | example3/example3.py | 1 | 2363 | """
Test 3
Introduces the use of MLPClassifier.
"""
import csv
import math
from random import shuffle
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_val_score
from frequent import get_most_frequent_terms
print("Reading CSV...")
csvfile = open('data/train.csv', newline='')
datareader = csv.DictReader(csvfile)
data = list(datareader)
print("Shuffling data...")
shuffle(data)
print("Selecting data subset")
data = data[0: 2000]
HOW_MANY_FEATURES = 1000
print("Selecting features based on the most {} common words.".format(HOW_MANY_FEATURES))
most_frequent_terms = list()
def filter_tweets():
# we do not want to use words from the validation set
only_training_tweets = data[0: int(len(data)*0.8)]
for row in only_training_tweets:
yield row['tweet']
most_frequent_terms = get_most_frequent_terms(filter_tweets(), HOW_MANY_FEATURES)
print(most_frequent_terms)
automatic_features = [text for text, times in most_frequent_terms]
print("Generating data features...")
X = []
y = []
for row in data:
keywords_in_tweet = []
state_in_tweet = 0
location_in_tweet = 0
# check whether each keyword is inside tweet
tweet = row['tweet'].lower()
for keyword in automatic_features:
if keyword in tweet:
keywords_in_tweet.append(1)
else:
keywords_in_tweet.append(0)
# check whether state is inside tweet
if row['state'] in row['tweet']:
state_in_tweet = 1
# check whether location is inside tweet
if row['location'] in row['tweet']:
location_in_tweet = 1
# for now, we only use the weather type class
y_type_classes = [row['k1'], row['k2'], row['k3'], row['k4'], row['k5'], row['k6'], row['k7'], row['k8'],
row['k9'], row['k10'], row['k11'], row['k12'], row['k13'], row['k14'], row['k15']]
y_row = [ float(val) for val in y_type_classes ]
# now generate the numeric arrays X
x_row = keywords_in_tweet
X.append(x_row)
y.append(y_row)
print("Converting data to numpy matrix")
X = np.matrix(X)
y = np.matrix(y)
sigmoider = lambda val: 1 if float(val) >= 0.35 else 0
vsigmoid = np.vectorize(sigmoider)
print("Training...")
classifier = MLPClassifier()
score = cross_val_score(classifier, X, vsigmoid(y), scoring='f1_samples')
print(np.mean(score))
| apache-2.0 |
dreadjesus/MachineLearning | LogisticRegression/Exp_logistigregression.py | 1 | 1166 | '''
Logistic regression
1 dependent variable (binary), 2+ independent variable(s) (interval or ratio or dichotomous)
'''
import pandas as pd
# import numpy as np
# import matplotlib.pyplot as plt
# import seaborn as sns
# get data
ad_data = pd.read_csv('D:/Github_python_ML/LogisticRegression/advertising.csv')
# Split data and only use numeric data
X = ad_data[['Daily Time Spent on Site', 'Age', 'Area Income',
'Daily Internet Usage', 'Male']]
y = ad_data['Clicked on Ad']
# Split to test and train data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=101)
# make the ML object and train/fit it
from sklearn.linear_model import LogisticRegression
LogisticR = LogisticRegression()
LogisticR.fit(X_train, y_train)
# Predict from test data
predictions = LogisticR.predict(X_test)
# Print the report
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_pred=predictions, y_true=y_test))
print(confusion_matrix(y_pred=predictions, y_true=y_test))
print('[True NO, False NO]')
print('[False YES, True YES]')
| mit |
jrbourbeau/cr-composition | models/save_composition_classifier.py | 1 | 3617 | #!/usr/bin/env python
from __future__ import division, print_function
import os
import argparse
import sklearn
from sklearn.externals import joblib
import warnings
import comptools as comp
warnings.filterwarnings("ignore", category=DeprecationWarning, module="sklearn")
if __name__ == '__main__':
description = 'Saves trained composition classification model for later use'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', '--config', dest='config',
choices=comp.simfunctions.get_sim_configs(),
help='Detector configuration')
parser.add_argument('--num_groups', dest='num_groups', type=int,
default=4, choices=[2, 3, 4],
help='Number of composition groups')
parser.add_argument('--pipeline', dest='pipeline',
default='BDT',
help='Composition classification pipeline to use')
parser.add_argument('--gridsearch', dest='gridsearch',
action='store_true',
default=False,
help=('Perform a grid search to find optimal '
'hyperparameter values.'))
parser.add_argument('--n_jobs', dest='n_jobs', type=int,
default=1, choices=list(range(1, 21)),
help='Number of jobs to run in parallel for the '
'gridsearch. Ignored if gridsearch=False.')
args = parser.parse_args()
config = args.config
num_groups = args.num_groups
comp_list = comp.get_comp_list(num_groups=num_groups)
# Load training data and fit model
df_sim_train, df_sim_test = comp.load_sim(config=config,
energy_reco=False,
log_energy_min=None,
log_energy_max=None,
test_size=0.5)
feature_list, feature_labels = comp.get_training_features()
X_train = df_sim_train[feature_list].values
y_train = df_sim_train['comp_target_{}'.format(num_groups)].values
# Load untrained model
pipeline_str = '{}_comp_{}_{}-groups'.format(args.pipeline, config, num_groups)
pipeline = comp.get_pipeline(pipeline_str)
if args.gridsearch:
param_grid = comp.get_param_grid(pipeline_name=pipeline_str)
pipeline = comp.gridsearch_optimize(pipeline=pipeline,
param_grid=param_grid,
X_train=X_train,
y_train=y_train,
scoring='accuracy')
else:
pipeline.fit(X_train, y_train)
# Construct dictionary containing fitted pipeline along with metadata
# For information on why this metadata is needed see:
# http://scikit-learn.org/stable/modules/model_persistence.html#security-maintainability-limitations
model_dict = {'pipeline': pipeline,
# Using tuple because items must be pickle-able
'training_features': tuple(feature_list),
'sklearn_version': sklearn.__version__,
'save_pipeline_code': os.path.realpath(__file__)}
outfile = os.path.join(comp.paths.comp_data_dir,
config,
'models',
'{}.pkl'.format(pipeline_str))
comp.check_output_dir(outfile)
joblib.dump(model_dict, outfile)
| mit |
yaojingwu1992/XlsxWriter | examples/pandas_datetime.py | 9 | 1758 | ##############################################################################
#
# An example of converting a Pandas dataframe with datetimes to an xlsx file
# with a default datetime and date format using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, [email protected]
#
import pandas as pd
from datetime import datetime, date
# Create a Pandas dataframe from some datetime data.
df = pd.DataFrame({'Date and time': [datetime(2015, 1, 1, 11, 30, 55),
datetime(2015, 1, 2, 1, 20, 33),
datetime(2015, 1, 3, 11, 10 ),
datetime(2015, 1, 4, 16, 45, 35),
datetime(2015, 1, 5, 12, 10, 15)],
'Dates only': [date(2015, 2, 1),
date(2015, 2, 2),
date(2015, 2, 3),
date(2015, 2, 4),
date(2015, 2, 5)],
})
# Create a Pandas Excel writer using XlsxWriter as the engine.
# Also set the default datetime and date formats.
writer = pd.ExcelWriter("pandas_datetime.xlsx",
engine='xlsxwriter',
datetime_format='mmm d yyyy hh:mm:ss',
date_format='mmmm dd yyyy')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects in order to set the column
# widths, to make the dates clearer.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.set_column('B:C', 20)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
WormLabCaltech/Angeles_Leighton_2016 | src/brood_size_analysis.py | 2 | 1196 | # -*- coding: utf-8 -*-
"""
A script to analyze RNAi brood size assays.
author: David Angeles
contact: [email protected]
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
input_path = '../input/rnai_screen_results/'
df = pd.read_csv(input_path + 'brood_assay_screen.csv')
names = pd.read_csv(input_path + 'rnai_genes_dict.csv')
# Drop all useless columns that excel makes
# also drop any and all entries that have NaNs
names.drop('Unnamed: 4', 1, inplace=True)
names.dropna(0, 'any', inplace=True)
# make all codes upper or lower, not both
names.code = names.code.apply(str.lower)
df.rnai = df.rnai.apply(str.lower)
# extract the names that have been assayed so far
translate = lambda x: names[names.code == x].gene_name.values[0]
df['gene'] = df.rnai.apply(translate)
#fill all NANs with the mean value for that column
df.fillna(df.mean(), inplace=True)
df['total'] = df.d1 + df.d2 + df.d3
df.boxplot('total', by='rnai')
plt.show()
df.boxplot('total', by='gene')
plt.show()
sns.swarmplot(x='gene', y='total', data=df, size=10)
plt.show()
sns.swarmplot(x='rnai', y='total', data=df, size=10)
plt.show()
# trim outliers
| mit |
shankari/e-mission-server | emission/analysis/section_features.py | 2 | 16565 | from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
from past.utils import old_div
import math
import logging
import numpy as np
import utm
from sklearn.cluster import DBSCAN
# Our imports
from emission.core.get_database import get_section_db, get_mode_db, get_routeCluster_db,get_transit_db
from emission.core.common import calDistance, Include_place_2
from emission.analysis.modelling.tour_model.trajectory_matching.route_matching import getRoute,fullMatchDistance,matchTransitRoutes,matchTransitStops
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.core.wrapper.entry as ecwe
import emission.core.wrapper.modeprediction as ecwm
import emission.storage.decorations.trip_queries as esdt
from uuid import UUID
Sections = get_section_db()
Modes = get_mode_db()
# The speed is in m/s
def calOverallSectionSpeed(section):
distanceDelta = section.distance
timeDelta = section.duration
if timeDelta != 0:
retVal = distanceDelta / timeDelta
else:
retVal = None
logging.debug("while calculating overall section speed distanceDelta = %s, timeDelta = %s, result = %s" %
(distanceDelta, timeDelta, retVal))
return retVal
def calSpeed(point1, point2):
from dateutil import parser
distanceDelta = calDistance(point1['data']['loc']['coordinates'],
point2['data']['loc']['coordinates'])
timeDelta = point2['data']['ts'] - point1['data']['ts']
# logging.debug("while calculating speed form %s -> %s, distanceDelta = %s, timeDelta = %s" %
# (trackpoint1, trackpoint2, distanceDelta, timeDelta))
if timeDelta != 0:
return old_div(distanceDelta, timeDelta.total_seconds())
else:
return None
# This formula is from:
# http://www.movable-type.co.uk/scripts/latlong.html
# It returns the heading between two points using
def calHeading(point1, point2):
# points are in GeoJSON format, ie (lng, lat)
phi1 = math.radians(point1[1])
phi2 = math.radians(point2[1])
lambda1 = math.radians(point1[0])
lambda2 = math.radians(point2[0])
y = math.sin(lambda2-lambda1) * math.cos(phi2)
x = math.cos(phi1)*math.sin(phi2) - \
math.sin(phi1)*math.cos(phi2)*math.cos(lambda2-lambda1)
brng = math.degrees(math.atan2(y, x))
return brng
def calHC(point1, point2, point3):
HC = calHeading(point2, point3) - calHeading(point1, point2)
return HC
def calHCR(section_entry):
section = section_entry.data
ts = esta.TimeSeries.get_time_series(section_entry.user_id)
tq = esda.get_time_query_for_trip_like_object(section)
locations = list(ts.find_entries(["analysis/recreated_location"], tq))
if len(locations) < 3:
return 0
HCNum = 0
for (i, point) in enumerate(locations[:-2]):
currPoint = point
nextPoint = locations[i+1]
nexNextPt = locations[i+2]
HC = calHC(currPoint['data']['loc']['coordinates'], nextPoint['data']['loc']['coordinates'], \
nexNextPt['data']['loc']['coordinates'])
if HC >= 15:
HCNum += 1
sectionDist = section.distance
if sectionDist!= None and sectionDist != 0:
HCR = HCNum/sectionDist
return HCR
else:
return 0
def calSR(section):
if 'speeds' not in section.data:
return 0
speeds = section.data["speeds"]
if len(speeds) < 2:
return 0
else:
stopNum = 0
for (i, speed) in enumerate(speeds[:-1]):
currVelocity = speed
if currVelocity != None and currVelocity <= 0.75:
stopNum += 1
sectionDist = section.data.distance
if sectionDist != None and sectionDist != 0:
return stopNum/sectionDist
else:
return 0
def calVCR(section_entry):
section = section_entry.data
speeds = section['speeds']
if len(speeds) < 3:
return 0
else:
Pv = 0
for (i, speed) in enumerate(speeds[:-1]):
velocity1 = speed
velocity2 = speeds[i+1]
if velocity1 != None and velocity2 != None:
if velocity1 != 0:
VC = abs(velocity2 - velocity1)/velocity1
else:
VC = 0
else:
VC = 0
if VC > 0.7:
Pv += 1
sectionDist = section.distance
if sectionDist != None and sectionDist != 0:
return Pv/sectionDist
else:
return 0
def calSpeeds(section):
try:
return section["speeds"]
except KeyError:
return None
# In order to calculate the acceleration, we do the following.
# point0: (loc0, t0), point1: (loc1, t1), point2: (loc2, t2), point3: (loc3, t3)
# becomes
# speed0: ((loc1 - loc0) / (t1 - t0)), speed1: ((loc2 - loc1) / (t2-t1)),
# speed2: ((loc3 - loc2) / (t3 - t2)
# becomes
# section0: speed0 / (t1 - t0), section1: (speed1 - speed0)/(t2-t1),
# section2: (speed2 - speed1) / (t3-t2)
def calAccels(section):
speeds = calSpeeds(section)
if speeds is None or len(speeds) == 0:
return None
accel = np.zeros(len(speeds))
prevSpeed = 0
for (i, speed) in enumerate(speeds):
currSpeed = speed # speed0
speedDelta = currSpeed - prevSpeed # (speed0 - 0)
# since we are working with cleaned sections, that have resampled data,
# we know that the difference between the timestamps is 30 sec
# and we don't need to query to determine what it actually is
# if we ever revisit the resampling, we need to fix this again
timeDelta = 30
logging.debug("while calculating accels at index %d, speedDelta = %s, timeDelta = %s" %
(i, speedDelta, timeDelta))
if timeDelta != 0:
accel[i] = speedDelta/timeDelta
logging.debug("resulting acceleration is %s" % accel[i])
# else: remains at zero
prevSpeed = currSpeed
return accel
def getIthMaxSpeed(section, i):
# python does not appear to have a built-in mechanism for returning the top
# ith max. We would need to write our own, possibly by sorting. Since it is
# not clear whether we ever actually need this (the paper does not explain
# which i they used), we just return the max.
assert(i == 1)
speeds = calSpeeds(section)
return np.amax(speeds)
def getIthMaxAccel(section, i):
# python does not appear to have a built-in mechanism for returning the top
# ith max. We would need to write our own, possibly by sorting. Since it is
# not clear whether we ever actually need this (the paper does not explain
# which i they used), we just return the max.
assert(i == 1)
accels = calAccels(section)
return np.amax(accels)
def calSpeedDistParams(speeds):
return (np.mean(speeds), np.std(speeds))
# def user_tran_mat(user):
# user_sections=[]
# # print(tran_mat)
# query = {"$and": [{'type': 'move'},{'user_id':user},\
# {'$or': [{'confirmed_mode':1}, {'confirmed_mode':3},\
# {'confirmed_mode':5},{'confirmed_mode':6},{'confirmed_mode':7}]}]}
# # print(Sections.count_documents(query))
# for section in Sections.find(query).sort("section_start_datetime",1):
# user_sections.append(section)
# if Sections.count_documents(query)>=2:
# tran_mat=np.zeros([Modes.estimated_document_count(), Modes.estimated_document_count()])
# for i in range(len(user_sections)-1):
# if (user_sections[i+1]['section_start_datetime']-user_sections[i]['section_end_datetime']).seconds<=60:
# # print(user_sections[i+1]['section_start_datetime'],user_sections[i]['section_end_datetime'])
# fore_mode=user_sections[i]["confirmed_mode"]
# after_mode=user_sections[i+1]["confirmed_mode"]
# tran_mat[fore_mode-1,after_mode-1]+=1
# row_sums = tran_mat.sum(axis=1)
# new_mat = tran_mat / row_sums[:, np.newaxis]
# return new_mat
# else:
# return None
#
# # all model
# def all_tran_mat():
# tran_mat=np.zeros([Modes.estimated_document_count(), Modes.estimated_document_count()])
# for user in Sections.distinct("user_id"):
# user_sections=[]
# # print(tran_mat)
# query = {"$and": [{'type': 'move'},{'user_id':user},\
# {'$or': [{'confirmed_mode':1}, {'confirmed_mode':3},\
# {'confirmed_mode':5},{'confirmed_mode':6},{'confirmed_mode':7}]}]}
# # print(Sections.count_documents(query))
# for section in Sections.find(query).sort("section_start_datetime",1):
# user_sections.append(section)
# if Sections.count_documents(query)>=2:
# for i in range(len(user_sections)-1):
# if (user_sections[i+1]['section_start_datetime']-user_sections[i]['section_end_datetime']).seconds<=60:
# # print(user_sections[i+1]['section_start_datetime'],user_sections[i]['section_end_datetime'])
# fore_mode=user_sections[i]["confirmed_mode"]
# after_mode=user_sections[i+1]["confirmed_mode"]
# tran_mat[fore_mode-1,after_mode-1]+=1
# row_sums = tran_mat.sum(axis=1)
# new_mat = tran_mat / row_sums[:, np.newaxis]
# return new_mat
def mode_cluster(mode,eps,sam):
mode_change_pnts=[]
query = {'confirmed_mode':mode}
logging.debug("Trying to find cluster locations for %s trips" % (Sections.count_documents(query)))
for section in Sections.find(query).sort("section_start_datetime",1):
try:
mode_change_pnts.append(section['section_start_point']['coordinates'])
mode_change_pnts.append(section['section_end_point']['coordinates'])
except:
logging.warning("Found trip %s with missing start and/or end points" % (section['_id']))
pass
if len(mode_change_pnts) == 0:
logging.debug("No points found in cluster input, nothing to fit..")
return np.zeros(0)
if len(mode_change_pnts)>=1:
# print(mode_change_pnts)
np_points=np.array(mode_change_pnts)
# print(np_points[:,0])
# fig, axes = plt.subplots(1, 1)
# axes.scatter(np_points[:,0], np_points[:,1])
# plt.show()
else:
pass
utm_x = []
utm_y = []
for row in mode_change_pnts:
# GEOJSON order is lng, lat
utm_loc = utm.from_latlon(row[1],row[0])
utm_x = np.append(utm_x,utm_loc[0])
utm_y = np.append(utm_y,utm_loc[1])
utm_location = np.column_stack((utm_x,utm_y))
db = DBSCAN(eps=eps,min_samples=sam)
db_fit = db.fit(utm_location)
db_labels = db_fit.labels_
#print db_labels
new_db_labels = db_labels[db_labels!=-1]
new_location = np_points[db_labels!=-1]
# print len(new_db_labels)
# print len(new_location)
# print new_information
label_unique = np.unique(new_db_labels)
cluster_center = np.zeros((len(label_unique),2))
for label in label_unique:
sub_location = new_location[new_db_labels==label]
temp_center = np.mean(sub_location,axis=0)
cluster_center[int(label)] = temp_center
# print cluster_center
return cluster_center
#
# print(mode_cluster(6))
def mode_start_end_coverage(section,cluster,eps):
mode_change_pnts=[]
# print(tran_mat)
num_sec=0
centers=cluster
# print(centers)
try:
if Include_place_2(centers,section['section_start_point']['coordinates'],eps) and \
Include_place_2(centers,section['section_end_point']['coordinates'],eps):
return 1
else:
return 0
except:
return 0
# print(mode_start_end_coverage(5,105,2))
# print(mode_start_end_coverage(6,600,2))
# This is currently only used in this file, so it is fine to use only really
# user confirmed modes. We don't want to learn on trips where we don't have
# ground truth.
def get_mode_share_by_count(lst):
# input here is a list of sections
displayModeList = getDisplayModes()
# logging.debug(displayModeList)
modeCountMap = {}
for mode in displayModeList:
modeCountMap[mode['mode_name']] = 0
for section in lst:
if section['confirmed_mode']==mode['mode_id']:
modeCountMap[mode['mode_name']] +=1
elif section['mode']==mode['mode_id']:
modeCountMap[mode['mode_name']] +=1
return modeCountMap
# This is currently only used in this file, so it is fine to use only really
# user confirmed modes. We don't want to learn on trips where we don't have
# ground truth.
def get_mode_share_by_count(list_idx):
Sections=get_section_db()
## takes a list of idx's
AllModeList = getAllModes()
MODE = {}
MODE2= {}
for mode in AllModeList:
MODE[mode['mode_id']]=0
for _id in list_idx:
section=Sections.find_one({'_id': _id})
mode_id = section['confirmed_mode']
try:
MODE[mode_id] += 1
except KeyError:
MODE[mode_id] = 1
# print(sum(MODE.values()))
if sum(MODE.values())==0:
for mode in AllModeList:
MODE2[mode['mode_id']]=0
# print(MODE2)
else:
for mode in AllModeList:
MODE2[mode['mode_id']]=old_div(MODE[mode['mode_id']],sum(MODE.values()))
return MODE2
def cluster_route_match_score(section,step1=100000,step2=100000,method='lcs',radius1=2000,threshold=0.5):
userRouteClusters=get_routeCluster_db().find_one({'$and':[{'user':section['user_id']},{'method':method}]})['clusters']
route_seg = getRoute(section['_id'])
dis=999999
medoid_ids=list(userRouteClusters.keys())
if len(medoid_ids)!=0:
choice=medoid_ids[0]
for idx in list(userRouteClusters.keys()):
route_idx=getRoute(idx)
try:
dis_new=fullMatchDistance(route_seg,route_idx,step1,step2,method,radius1)
except RuntimeError:
dis_new=999999
if dis_new<dis:
dis=dis_new
choice=idx
# print(dis)
# print(userRouteClusters[choice])
if dis<=threshold:
cluster=userRouteClusters[choice]
cluster.append(choice)
ModePerc=get_mode_share_by_count(cluster)
else:
ModePerc=get_mode_share_by_count([])
return ModePerc
def transit_route_match_score(section,step1=100000,step2=100000,method='lcs',radius1=2500,threshold=0.5):
Transits=get_transit_db()
transitMatch={}
route_seg=getRoute(section['_id'])
for type in Transits.distinct('type'):
for entry in Transits.find({'type':type}):
transitMatch[type]=matchTransitRoutes(route_seg,entry['stops'],step1,step2,method,radius1,threshold)
if transitMatch[entry['type']]==1:
break
return transitMatch
def transit_stop_match_score(section,radius1=300):
Transits=get_transit_db()
transitMatch={}
route_seg=getRoute(section['_id'])
for type in Transits.distinct('type'):
for entry in Transits.find({'type':type}):
transitMatch[type]=matchTransitStops(route_seg,entry['stops'],radius1)
if transitMatch[entry['type']]==1:
break
return transitMatch
def select_inferred_mode(prediction_list):
# We currently only support a single prediction
assert(len(prediction_list) == 1)
curr_prediction = prediction_list[0]
assert(curr_prediction.algorithm_id == ecwm.AlgorithmTypes.SEED_RANDOM_FOREST)
prediction_map = curr_prediction["predicted_mode_map"]
max_value = max(prediction_map.values())
logging.debug("max confidence in prediction map = %s" % max_value)
keys_for_max_value = [k for (k, v) in prediction_map.items() if v == max_value]
logging.debug("max keys in prediction map = %s" % keys_for_max_value)
if len(keys_for_max_value) == 1:
return keys_for_max_value[0]
else:
classes_for_max_value = [ecwm.PredictedModeTypes[key].value for key in keys_for_max_value]
logging.debug("classes for max_value = %s" % classes_for_max_value)
min_class = min(classes_for_max_value)
logging.debug("min_class = %s" % min_class)
return ecwm.PredictedModeTypes(min_class).name
| bsd-3-clause |
brainstorm/bcbio-nextgen-vm | setup.py | 2 | 2841 | #!/usr/bin/env python
import os
import shutil
import sys
from setuptools import setup, find_packages
version = "0.1.0a"
def write_version_py():
version_py = os.path.join(os.path.dirname(__file__), "bcbiovm", "version.py")
try:
import subprocess
p = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"],
stdout=subprocess.PIPE)
githash = p.stdout.read().strip()
except:
githash = ""
with open(version_py, "w") as out_handle:
out_handle.write("\n".join(['__version__ = "%s"' % version,
'__git_revision__ = "%s"' % githash]))
write_version_py()
if "--record=/dev/null" in sys.argv: # conda build
install_requires = []
else:
install_requires = [
"matplotlib", "pandas", "paramiko", "six", "PyYAML",
"pythonpy", "bcbio-nextgen"]
setup(name="bcbio-nextgen-vm",
version=version,
author="Brad Chapman and bcbio-nextgen contributors",
description="Run bcbio-nextgen genomic sequencing analyses using isolated containers and virtual machines",
license="MIT",
url="https://github.com/chapmanb/bcbio-nextgen-vm",
packages=find_packages(),
scripts=["scripts/bcbio_vm.py"],
install_requires=install_requires)
def ansible_pb_files(ansible_pb_dir):
"""Retrieve ansible files for installation. Derived from elasticluster setup.
"""
ansible_data = []
for (dirname, dirnames, filenames) in os.walk(ansible_pb_dir):
tmp = []
for fname in filenames:
if fname.startswith(".git"): continue
tmp.append(os.path.join(dirname, fname))
ansible_data.append((os.path.join("share", "bcbio-vm", dirname), tmp))
return ansible_data
def elasticluster_config_files(base_dir):
"""Retrieve example elasticluster config files for installation.
"""
return [(os.path.join("share", "bcbio-vm", base_dir),
[os.path.join(base_dir, x) for x in os.listdir(base_dir)])]
if __name__ == "__main__":
"""Install ansible playbooks and other associated data files.
"""
if sys.argv[1] in ["develop", "install"]:
for dirname, fnames in ansible_pb_files("ansible") + elasticluster_config_files("elasticluster"):
dirname = os.path.join(os.path.abspath(sys.prefix), dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
for fname in fnames:
if sys.argv[1] == "develop":
link_path = os.path.join(dirname, os.path.basename(fname))
if not os.path.exists(link_path):
link_target = os.path.join(os.getcwd(), fname)
os.symlink(link_target, link_path)
else:
shutil.copy(fname, dirname)
| mit |
aleksandar-mitrevski/fault-management | graph_fault_analysis/graph_analysis.py | 1 | 11500 | import networkx as nx
import matplotlib.pyplot as plt
import sympy
class GraphAnalyser(object):
'''Defines a library for creating and analysing graphs that represent system constraints and variables.
The class is based on M. Blanke, M. Kinnaert, J. Lunze and M. Staroswiecki, Diagnosis and Fault Tolerant Control. Germany: Springer-Verlag Berlin Heidelberg, 2006.
Author -- Aleksandar Mitrevski
'''
def __init__(self, unknown_vars, known_vars, constraints):
'''
Keyword arguments:
unknown_vars -- A list of unknown system variables.
known_vars -- A list of known system variables.
constraints -- A dictionary where the keys represent constraints and the values represent list of variables associated with the appropriate constraints.
'''
self.__unknown_vars = list(unknown_vars)
self.__known_vars = list(known_vars)
self.__constraints = dict(constraints)
#dictionary used for better node organisation in visualisations
self.__node_positions = dict()
self.__graph = self.__create_graph(unknown_vars, known_vars, constraints)
def __create_graph(self, unknown_vars, known_vars, constraints):
'''Creates a bipartite graph connecting system variables and constraints.
Keyword arguments:
unknown_vars -- A list of unknown system variables.
known_vars -- A list of known system variables.
constraints -- A dictionary where the keys represent constraints and the values represent list of variables associated with the appropriate constraints.
Returns:
graph -- A 'networkx.Graph' object.
'''
graph = nx.Graph()
for i in xrange(len(unknown_vars)):
graph.add_node(unknown_vars[i])
self.__node_positions[unknown_vars[i]] = (len(self.__node_positions.keys())*10., 20.)
for i in xrange(len(known_vars)):
graph.add_node(known_vars[i])
self.__node_positions[known_vars[i]] = (len(self.__node_positions.keys())*10., 20.)
#used for node organisation in visualisations of the graph
constraint_counter = 0
for constraint, variables in constraints.iteritems():
graph.add_node(constraint)
self.__node_positions[constraint] = (constraint_counter*10., 0.)
constraint_counter = constraint_counter + 1
for j in xrange(len(variables)):
graph.add_edge(constraint, variables[j])
return graph
def draw_graph(self, edge_colours=None):
'''Draws 'self.__graph'. The nodes are coloured as in http://stackoverflow.com/questions/13517614/draw-different-color-for-nodes-in-networkx-based-on-their-node-value?rq=1
Keyword arguments:
edge_colours -- If it is equal to None, the variable is ignored; if not, it is expected to be a list of colours, such that the i-th element corresponds to the i-th edge in 'self.__graph.edges()'.
'''
node_colours = list()
for node in self.__graph.nodes():
if node in self.__unknown_vars:
node_colours.append('r')
elif node in self.__known_vars:
node_colours.append('g')
else:
node_colours.append('b')
if edge_colours != None:
nx.draw(self.__graph, pos=self.__node_positions, node_color=node_colours, edge_color=edge_colours)
else:
nx.draw(self.__graph, pos=self.__node_positions, node_color=node_colours)
def draw_matched_graph(self, matching):
'''Draws 'self.__graph' using a previously found matching.
Keyword arguments:
matching -- A dictionary where the keys represent constraints and the values represent variables matched with the constraints.
'''
matching_edges = self.get_matching_edges(matching)
edge_colours = list()
for _,edge in enumerate(self.__graph.edges()):
if edge in matching_edges or edge[::-1] in matching_edges:
edge_colours.append('r')
else:
edge_colours.append('k')
self.draw_graph(edge_colours)
def draw_directed_graph(self, matching):
'''Uses a matching for finding a directed version of 'self.__graph'; draws the directed graph after constructing it.
Keyword arguments:
matching -- A dictionary where the keys represent constraints and the values represent variables matched with the constraints.
'''
directed_graph = self.__create_matching_graph(matching)
zero_var = sympy.Symbol('zero')
node_colours = list()
for node in directed_graph.nodes():
if node in self.__unknown_vars:
node_colours.append('r')
elif node in self.__known_vars or node == zero_var:
node_colours.append('g')
else:
node_colours.append('b')
node_positions = dict(self.__node_positions)
node_positions[zero_var] = ((len(self.__unknown_vars) + len(self.__known_vars))*10., 20.)
nx.draw(directed_graph, pos=node_positions, node_color=node_colours)
def get_nodes(self):
return self.__graph.nodes()
def get_edges(self):
return self.__graph.edges()
def get_matching_edges(self, matching):
'''Converts the input dictionary into a list of tuples and returns the list.
Keyword arguments:
matching -- A dictionary where the keys represent constraints and the values represent variables matched with the constraints.
Returns:
matching_edges -- A list of tuples; each tuple represents an edge that belongs to a matching.
'''
matching_edges = list()
for key,value in matching.iteritems():
matching_edges.append((key,value))
return matching_edges
def get_adjacency_matrix(self):
'''Finds a reduced version of the adjacency matrix representing 'self.__graph',
such that the rows represent constraints and the columns represent variables.
Returns:
reduced_matrix -- The reduced adjacency matrix.
ordered_constraints -- A list of constraints represented by the graph, such that the i-th constraint represents the i-th row of 'reduced_matrix'.
ordered_variables -- A list of variables represented by the graph, such that the i-th variables represents the i-th column of 'reduced_matrix'.
'''
matrix = nx.adjacency_matrix(self.__graph).tolist()
nodes = self.__graph.nodes()
ordered_constraints = []
ordered_variables = []
for i,node in enumerate(nodes):
if node in self.__constraints:
ordered_constraints.append(node)
else:
ordered_variables.append(node)
reduced_matrix = []
for i,node in enumerate(nodes):
if node in self.__constraints:
adjacency_row = list(matrix[i])
j = len(nodes)-1
while j >= 0:
if nodes[j] in self.__constraints:
del adjacency_row[j]
j = j - 1
reduced_matrix.append(adjacency_row)
return reduced_matrix, ordered_constraints, ordered_variables
def find_matching(self, banned_edges=[]):
'''Finds a matching in 'self.__graph'.
The matching algorithm uses the ranking algorithm described in Blanke et al., p. 142.
Keyword arguments:
banned_edges -- A list of tuples; each tuples represents an edge that cannot participate in a matching because it would cause integral causality.
Returns:
matching -- A dictionary where the keys represent constraints and the values represent variables matched with the constraints.
'''
rank = 0
matching = dict()
ranking = dict()
zero_var = sympy.Symbol('zero')
for _,variable in enumerate(self.__known_vars):
ranking[variable] = rank
adj_matrix, ordered_constraints, ordered_variables = self.get_adjacency_matrix()
#we remove the banned edges from the adjacency matrix
for i,constraint in enumerate(ordered_constraints):
for j,variable in enumerate(ordered_variables):
if adj_matrix[i][j] > 0. and (constraint,variable) in banned_edges:
adj_matrix[i][j] = 0.
continue_ranking = True
while continue_ranking:
for i,constraint in enumerate(ordered_constraints):
unmatched_vars_counter = 0
unmatched_var = None
for j,variable in enumerate(ordered_variables):
if adj_matrix[i][j] > 0. and variable not in ranking.keys():
unmatched_vars_counter = unmatched_vars_counter + 1
unmatched_var = variable
if unmatched_vars_counter == 1:
ranking[unmatched_var] = rank
ranking[constraint] = rank
matching[constraint] = unmatched_var
for i,constraint in enumerate(ordered_constraints):
if constraint not in ranking.keys():
unmatched_vars_counter = 0
unmatched_var = None
for j,variable in enumerate(ordered_variables):
if adj_matrix[i][j] > 0. and variable not in ranking.keys():
unmatched_vars_counter = unmatched_vars_counter + 1
unmatched_var = variable
if unmatched_vars_counter == 0:
ranking[constraint] = rank
matching[constraint] = zero_var
all_variables_ranked = True
for _,variable in enumerate(ordered_variables):
if variable not in ranking.keys():
all_variables_ranked = False
break
all_constraints_ranked = True
for _,constraints in enumerate(ordered_constraints):
if constraints not in ranking.keys():
all_constraints_ranked = False
break
if all_constraints_ranked or all_variables_ranked:
continue_ranking = False
else:
rank = rank + 1
return matching
def __create_matching_graph(self, matching):
'''Finds a directed version of 'self.__graph' using the input matching; the conversion of the graph is based on Blanke et al., p. 125.
Returns:
graph -- A 'network.DiGraph' object representing the graph's directed version.
'''
graph = nx.DiGraph()
matching_edges = self.get_matching_edges(matching)
for _,edge in enumerate(self.__graph.edges()):
if edge in matching_edges:
graph.add_edge(edge[0], edge[1])
elif edge[::-1] in matching_edges:
graph.add_edge(edge[1], edge[0])
else:
if edge[0] in self.__constraints:
graph.add_edge(edge[1], edge[0])
else:
graph.add_edge(edge[0], edge[1])
zero_var = sympy.Symbol('zero')
for _,edge in enumerate(matching_edges):
if zero_var == edge[1]:
graph.add_edge(edge[0], edge[1])
return graph | mit |
byu-dml/metalearn | metalearn/metafeatures/landmarking_metafeatures.py | 1 | 5474 | import warnings
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_validate, StratifiedKFold
from sklearn.metrics import make_scorer, accuracy_score, cohen_kappa_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from metalearn.metafeatures.base import build_resources_info, MetafeatureComputer
from metalearn.metafeatures.constants import MetafeatureGroup, ProblemType
'''
Compute Landmarking meta-features according to Reif et al. 2012.
The accuracy values of the following simple learners are used:
Naive Bayes, Linear Discriminant Analysis, One-Nearest Neighbor,
Decision Node, Random Node.
'''
def run_pipeline(X, Y, pipeline, n_folds, cv_seed):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning) # suppress sklearn warnings
warnings.filterwarnings("ignore", category=UserWarning) # suppress sklearn warnings
accuracy_scorer = make_scorer(accuracy_score)
kappa_scorer = make_scorer(cohen_kappa_score)
cv = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=cv_seed)
scores = cross_validate(
pipeline, X.values, Y.values, cv=cv, n_jobs=1, scoring={
'accuracy': accuracy_scorer, 'kappa': kappa_scorer
}
)
err_rate = 1. - np.mean(scores['test_accuracy'])
kappa = np.mean(scores['test_kappa'])
return (err_rate, kappa)
def get_naive_bayes(X, Y, n_folds, cv_seed):
pipeline = Pipeline([('naive_bayes', GaussianNB())])
return run_pipeline(X, Y, pipeline, n_folds, cv_seed)
get_naive_bayes = MetafeatureComputer(
get_naive_bayes,
[
"NaiveBayesErrRate",
"NaiveBayesKappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample"
}
)
def get_knn_1(X, Y, n_folds, cv_seed):
pipeline = Pipeline([(
'knn_1', KNeighborsClassifier(n_neighbors = 1, n_jobs=1)
)])
return run_pipeline(X, Y, pipeline, n_folds, cv_seed)
get_knn_1 = MetafeatureComputer(
get_knn_1,
[
"kNN1NErrRate",
"kNN1NKappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample"
}
)
def get_decision_stump(X, Y, seed, n_folds, cv_seed):
pipeline = Pipeline([(
'decision_stump', DecisionTreeClassifier(
criterion='entropy', splitter='best', max_depth=1, random_state=seed
)
)])
return run_pipeline(X, Y, pipeline, n_folds, cv_seed)
get_decision_stump = MetafeatureComputer(
get_decision_stump,
[
"DecisionStumpErrRate",
"DecisionStumpKappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample",
"seed": 5
}
)
def get_random_tree(X, Y, depth, seed, n_folds, cv_seed):
pipeline = Pipeline([(
'random_tree', DecisionTreeClassifier(
criterion='entropy', splitter='random', max_depth=depth,
random_state=seed
)
)])
return run_pipeline(X, Y, pipeline, n_folds, cv_seed)
get_random_tree_depth_1 = MetafeatureComputer(
get_random_tree,
[
"RandomTreeDepth1ErrRate",
"RandomTreeDepth1Kappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample",
"depth": 1,
"seed": 6
}
)
get_random_tree_depth_2 = MetafeatureComputer(
get_random_tree,
[
"RandomTreeDepth2ErrRate",
"RandomTreeDepth2Kappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample",
"depth": 2,
"seed": 7
}
)
get_random_tree_depth_3 = MetafeatureComputer(
get_random_tree,
[
"RandomTreeDepth3ErrRate",
"RandomTreeDepth3Kappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample",
"depth": 3,
"seed": 8
}
)
def get_lda(X, Y, n_folds, cv_seed):
pipeline = Pipeline([(
'lda', LinearDiscriminantAnalysis()
)])
return run_pipeline(X, Y, pipeline, n_folds, cv_seed)
get_lda = MetafeatureComputer(
get_lda,
[
"LinearDiscriminantAnalysisErrRate",
"LinearDiscriminantAnalysisKappa"
],
ProblemType.CLASSIFICATION,
[
MetafeatureGroup.LANDMARKING,
MetafeatureGroup.TARGET_DEPENDENT
],
{
"X": "XPreprocessed",
"Y": "YSample"
}
)
"""
A list of all MetafeatureComputer
instances in this module.
"""
metafeatures_info = build_resources_info(
get_naive_bayes,
get_knn_1,
get_decision_stump,
get_random_tree_depth_1,
get_random_tree_depth_2,
get_random_tree_depth_3,
get_lda
)
| mit |
akionakamura/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
ephes/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
Djabbz/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
anurag313/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
hdmetor/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
LeeMendelowitz/DCMetroMetrics | utils/createStationDatabase.py | 2 | 6346 | # Author: Lee Mendelowitz
# Date: 2/19/2013
# Create a database of stations: stations.py
# Write variables to a python module, which can be
# imported
#############################################
import json
from collections import defaultdict
import numpy as np
import sys, os
# Add parent dir to sys path
curfile = os.path.abspath(__file__)
curdir = os.path.split(curfile)[0]
parentdir = os.path.split(curdir)[0]
REPO_DIR = parentdir
sys.path.append(REPO_DIR)
import dcmetrometrics
from dcmetrometrics.eles.WMATA_API import WMATA_API
from dcmetrometrics.keys import WMATA_API_KEY
class StationEscalatorData(object):
def __init__(self, codes, numEscalators, numRiders, weight):
self.numEscalators = numEscalators
self.numRiders = numRiders
self.weight = weight
self.codes = codes
self.escalatorWeight = float(self.weight)/self.numEscalators if self.numEscalators > 0 else 0.0
def makeDict(self):
keys = ['numEscalators', 'numRiders', 'weight', 'codes', 'escalatorWeight']
d = dict((k, self.__dict__[k]) for k in keys)
return d
class Station(object):
def __init__(self, apiDict):
d = apiDict
self.code = d['Code']
self.name = d['Name']
self.shortName = None # Set later
self.lat = d['Lat']
self.lon = d['Lon']
lineCodeKeys = ['LineCode%i'%i for i in (1,2,3,4)]
self.lineCodes = [c for c in (d.get(k, None) for k in lineCodeKeys) if c]
sharedStationKeys = ['StationTogether%i'%i for i in (1,2)]
self.sharedStations = [c for c in (d.get(k, None) for k in sharedStationKeys) if c]
self.allCodes = [self.code] + self.sharedStations
self.allLines = None # Set Later
def addEscalatorData(self, data):
for k,v in data.iteritems():
setattr(self, k, v)
def __getitem__(self, k):
return getattr(self, k, None)
def makeDict(self):
return dict(self.__dict__)
#######################################
# Get the weight of each escalator,
# to compute weighted availability
def getStationEscalatorData():
import os
import pandas
cwd = os.getcwd()
dataFile = os.path.join(REPO_DIR,'data', 'stations.data.csv')
stationData = pandas.read_table(dataFile)
codes = stationData['Codes']
riders = stationData['2012***']
numEsc = stationData['N']
stationsWithEsc = numEsc > 0
stationsWithNoEsc = np.logical_not(stationsWithEsc)
denom = float(riders[stationsWithEsc].sum())
weight = riders/denom
weight[stationsWithNoEsc] = 0.0
stationCodeToData = {}
for myCodes, myWeight, riderCount, escCount in zip(codes, weight, riders, numEsc):
if pandas.isnull(myCodes):
continue
myCodes = myCodes.split(',')
sd = StationEscalatorData(myCodes, escCount, riderCount, myWeight)
for c in myCodes:
stationCodeToData[c] = sd
stationCodeToData = dict((k,v.makeDict()) for k,v in stationCodeToData.items())
return stationCodeToData
###########################
# Read station names and short names from csv file
def getStationNames():
import os
import pandas
cwd = os.getcwd()
dataFile = os.path.join(REPO_DIR,'data', 'station.names.csv')
stationData = pandas.read_table(dataFile, sep=',')
codeToName = dict(zip(stationData['code'], stationData['long_name']))
codeToShortName = dict(zip(stationData['code'], stationData['short_name']))
return (codeToName, codeToShortName)
def defineVariables():
api = WMATA_API(key=WMATA_API_KEY)
allStations = json.loads(api.getStations().text)['Stations']
allStations = [Station(data) for data in allStations]
codeToStationData = dict((s.code, s) for s in allStations)
codeToName, codeToShortName = getStationNames()
# 7/26: Removing Escalator Data because it is missing for new Silver Line Stations
# codeToEscalatorData = getStationEscalatorData()
# assert(set(codeToName.keys()) == set(codeToEscalatorData.keys()) == set(codeToShortName.keys()))
# Adjust the station names and add escalator data
for code in codeToName.iterkeys():
s = codeToStationData[code]
s.name = codeToName[code]
s.shortName = codeToShortName[code]
#escD = codeToEscalatorData[code]
#s.addEscalatorData(escD)
# Set the allLines attribute for each station
codeToAllLines = defaultdict(list)
for s in allStations:
for c in s.allCodes:
codeToAllLines[c].extend(s.lineCodes)
for c in codeToAllLines:
allLines = list(set(codeToAllLines[c]))
s = codeToStationData[c]
s.allLines = allLines
allCodes = [s.code for s in allStations]
nameToCodes = defaultdict(list)
for code,name in codeToName.iteritems():
nameToCodes[name].append(code)
nameToCodes = dict(nameToCodes)
# Compile a list of station codes for each line
lineToCodes = defaultdict(list)
# Get Lines for a station
def getLines(s):
keys = ['LineCode%i'%i for i in range(1,5)]
lines = [l for l in (s[k] for k in keys) if l is not None]
return lines
for s in allStations:
lines = getLines(s)
for l in lines:
lineToCodes[l].append(s['Code'])
lineToCodes = dict(lineToCodes)
# Convert classes to dictionaries before exporting
allStations = [s.makeDict() for s in allStations]
codeToStationData = dict((k, s.makeDict()) for k,s in codeToStationData.iteritems())
res = { 'allStations' : allStations,
'codeToStationData' : codeToStationData,
'codeToName' : codeToName,
'codeToShortName' : codeToShortName,
'nameToCodes': nameToCodes,
'lineToCodes' : lineToCodes,
'allCodes' : allCodes,
#'codeToEscalatorData' : codeToEscalatorData
}
return res
def writeModule(moduleName, varDict):
import pprint
fout = open(moduleName, 'w')
printer = pprint.PrettyPrinter()
# Write variables
for var,item in varDict.items():
fout.write('%s = \\\n%s\n\n'%(var, printer.pformat(item)))
fout.close()
def run():
moduleName = 'stations2.py'
varDict = defineVariables()
writeModule(moduleName, varDict)
if __name__ == '__main__':
run()
| gpl-2.0 |
ldirer/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 53 | 13398 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X) | bsd-3-clause |
semplea/characters-meta | python/opener-compare.py | 1 | 4351 | # coding: utf8
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import division
import sys, glob, os
import getopt
import math
import collections
import matplotlib.pyplot as plt
import re
from scipy.interpolate import spline
from matplotlib.legend_handler import HandlerLine2D
def keyForMaxValue(_dict):
maxK = ''
maxV = 0
for k,v in _dict.iteritems():
if (v>maxV):
maxV = v
maxK = k
return maxK
try:
opts, args = getopt.getopt(sys.argv[1:], "df", ["file="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
sys.exit(2)
bookfile = ''
debug = False
for o, a in opts:
if o in ("-f", "--file"):
bookfile = a
if o in ("-d"):
debug = True
benchmark = {}
with open(bookfile+'.corr') as f:
for i, raw_line in enumerate(f):
line = unicode(raw_line.strip(), 'utf8').split(u"\t")
benchmark[line[0]] = line[1]
finalWordClasses = {'character':[],'place':[]}
openerScores = {}
with open(bookfile+'-opener.txt') as f:
for i, raw_line in enumerate(f):
line = unicode(raw_line.strip(), 'utf8').split(u"\t")
if not line[0] in openerScores:
openerScores[line[0]] = {"place":0,"character":0,"other":0}
if line[1]=="LOCATION":
openerScores[line[0]]["place"] = openerScores[line[0]]["place"]+1
elif line[1]=="PERSON" or line[1]=="ORGANIZATION":
openerScores[line[0]]["character"] = openerScores[line[0]]["character"]+1
else:
openerScores[line[0]]["other"] = openerScores[line[0]]["other"]+1
allpredictions = {}
fulltext = ''
with open (bookfile+'.txt') as f:
data = f.readlines()
allwords = len(re.findall(r'\w+', fulltext))
WORD_FREQUENCE_THRESHOLD = round(6+(allwords/10000)/4)
for p, values in openerScores.iteritems():
kmv = keyForMaxValue(values)
if (sum(values.values())>WORD_FREQUENCE_THRESHOLD):
allpredictions[p] = [[kmv, values[kmv]/sum(values.values())]]
for wp in allpredictions.keys():
for wb in benchmark.keys():
if wb in wp and wb!=wp and not wp in benchmark.keys():
print("WARN: "+wp+" = "+wb+"?")
if (debug):
for p, values in allpredictions.iteritems():
print(p+"\t"+values[0][0]+"\t"+str(values[0][1]))
weights = [1]
ncat = 0
unknown_words = []
correct_predictors = {}
ref_count = {} # reference (number of words that should fall in each category, by predictor; last idx=best choice)
attr_count = {} # attributions (number of words that fell in each category, by predictor; last idx=best choice)
for cat in ['character','place']:
ncat = ncat+1
correct_predictors[cat] = {}
attr_count[cat] = {}
ref_count[cat] = 0
for pred_idx in range(0,len(weights)+1):
correct_predictors[cat][pred_idx] = []
attr_count[cat][pred_idx] = []
for word, word_predictions in allpredictions.iteritems():
if word in benchmark.keys():
if (benchmark[word]==cat): # we only consider the words from this effective category
ref_count[cat] = ref_count[cat]+1
for pred_idx, prediction in enumerate(word_predictions):
correct_predictors[cat][pred_idx].append(1 if (prediction[0]==cat) else 0)
# if (prediction[0]==cat):
# print('OKK: '+word+' ('+cat+')')
# else:
# print('ERR: '+word+' ('+prediction[0]+' instead of '+cat+')')
correct_predictors[cat][pred_idx+1].append(1 if (cat in finalWordClasses and word in finalWordClasses[cat]) else 0)
else:
unknown_words.append(word) # we ignore words that are not listed in the benchmark file
for pred_idx, prediction in enumerate(word_predictions):
attr_count[cat][pred_idx].append(1 if prediction[0]==cat else 0)
attr_count[cat][pred_idx+1].append(1 if (cat in finalWordClasses and word in finalWordClasses[cat]) else 0)
precision_by_classes = {}
recall_by_classes = {}
for pred_idx in range(0,len(weights)+1):
precision_by_classes[pred_idx] = []
recall_by_classes[pred_idx] = []
for cat, cat_count in ref_count.iteritems():
for idx, pred_correct in correct_predictors[cat].iteritems():
precision_by_classes[idx].append((sum(pred_correct)/sum(attr_count[cat][idx]) if sum(attr_count[cat][idx])>0 else 1))
recall_by_classes[idx].append((sum(pred_correct)/cat_count if cat_count>0 else 0))
for idx in precision_by_classes.keys():
print(str(idx)+"\t"+"P="+str(sum(precision_by_classes[idx])/ncat)+"\t"+"R="+str(sum(recall_by_classes[idx])/ncat)) | mit |
yuyuz/FLASH | HPOlib/Plotting/plotBranin.py | 5 | 5471 | #!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
import matplotlib.cm
import matplotlib.gridspec as gridSpec
import matplotlib.pyplot
import numpy as np
import HPOlib.benchmark_functions
import HPOlib.Plotting.plot_util as plotUtil
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_contour(trial_list, name_list, save="", title=""):
# constraints:
# -5 <= x <= 10, 0 <= y <= 15
# three global optima: (-pi, 12.275), (pi, 2.275), (9.42478, 2.475), where
# branin = 0.397887
markers = itertools.cycle(['o', 's', '^', 'x'])
colors = itertools.cycle(['b', 'g', 'r', 'k'])
size = 5
# Get handles
ratio = 5
gs = gridSpec.GridSpec(ratio, 1)
fig = matplotlib.pyplot.figure(1, dpi=100)
fig.suptitle(title)
ax = matplotlib.pyplot.subplot(gs[0:ratio, :])
ax.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
xopt = [-np.pi, np.pi, 9.42478]
yopt = [12.275, 2.275, 2.475]
# Plot Branin
step = 0.1
xi = np.arange(-5, 10 + step, step)
yi = np.arange(-0, 15 + step, step)
z = np.zeros([len(xi), len(yi)])
for i in range(len(xi)):
for j in range(len(yi)):
#z[j, i] = np.power(np.e, branin.branin({"x":xi[i], "y":yi[j]}))
z[j, i] = HPOlib.benchmark_functions.branin(x=xi[i], y=yi[j])
xi, yi = np.meshgrid(xi, yi)
cax = ax.contourf(xi, yi, z, 50, cmap=matplotlib.cm.gray)
fig.colorbar(cax)
# Plot Optimums after all work is done
matplotlib.pyplot.scatter(xopt, yopt, marker="o", facecolor='w', edgecolor='w', s=20*size, label="Optimum")
# Get values
for opt in range(len(name_list)):
print name_list[opt], "has", len(trial_list[opt]['trials']), "samples"
m = markers.next()
c = colors.next()
x = np.zeros(len(trial_list[opt]["trials"]))
y = np.zeros(len(trial_list[opt]["trials"]))
for i in range(len(x)):
if '-x' in trial_list[opt]["trials"][i]["params"]:
x[i] = float(trial_list[opt]["trials"][i]["params"]["-x"].strip("'"))
y[i] = float(trial_list[opt]["trials"][i]["params"]["-y"].strip("'"))
else:
x[i] = float(trial_list[opt]["trials"][i]["params"]["x"].strip("'"))
y[i] = float(trial_list[opt]["trials"][i]["params"]["y"].strip("'"))
matplotlib.pyplot.scatter(x[0:10], y[0:10], marker=m,
s=size, facecolors=c, linewidth=0.1)
matplotlib.pyplot.scatter(x[10:-10], y[10:-10], marker=m,
linewidth=0.1, s=4*size, facecolors=c)
matplotlib.pyplot.scatter(x[-10:-1], y[-10:-1], marker=m,
linewidth=0.1, s=6*size, facecolors=c, label=name_list[opt][0])
matplotlib.pyplot.xlim([-5, 10])
matplotlib.pyplot.ylim([-0, 15])
matplotlib.pyplot.xlabel("X")
matplotlib.pyplot.ylabel("Y")
# Describe the plot
matplotlib.pyplot.title(title)
leg = matplotlib.pyplot.legend(loc="best", fancybox=True)
leg.get_frame().set_alpha(0.5)
if save != "":
matplotlib.pyplot.subplots_adjust(top=0.85)
matplotlib.pyplot.savefig(save, dpi=600, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
matplotlib.pyplot.show()
def main():
prog = "python plotBranin.py whatIsThis <onepkl> [whatIsThis] <onepkl>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
parser.add_argument("-s", "--save", dest="save", default="",
help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title", default="",
help="Optional supertitle for plot")
args, unknown = parser.parse_known_args()
if len(unknown) % 2 != 0:
print "Wrong number of arguments", len(args)
print prog
sys.exit(1)
pkl_list, name_list = plotUtil.get_pkl_and_name_list(unknown)
trial_list = list()
for i in range(len(name_list)):
result_file = pkl_list[i][0]
fh = open(result_file, "r")
trials = cPickle.load(fh)
fh.close()
trial_list.append(trials)
plot_contour(trial_list=trial_list, name_list=name_list, save=args.save, title=args.title)
if __name__ == "__main__":
main() | gpl-3.0 |
elsonidoq/py-l1tf | l1tf/example/example.py | 1 | 1328 | import csv
from matplotlib import pyplot as plt
import numpy as np
from l1tf import l1tf, remove_outliers
import pandas as pd
# Downloaded from http://www.barchartmarketdata.com/datasamples/US%20Futures%20Tick%20Query.csv
with open('US Futures Tick Query.csv') as f:
lines = list(csv.reader(f))
x = pd.Series(np.asarray([float(e[-2]) for e in lines[:1000]]))
outliers_percentaje = 0.2
outliers = np.random.random(len(x)) < outliers_percentaje
x_w_outliers = x.copy()
x_w_outliers[outliers] = (np.random.random(outliers.sum()) - 0.5) * 2 + x[outliers]
plt.figure()
plt.suptitle('Different fits by changing the $\delta$ parameter')
for i, delta in enumerate([1, 10]):
plt.subplot(2,1,i+1)
filtered = l1tf(x, delta)
plt.plot(x, label='Original data')
plt.plot(filtered, linewidth=5, label='Filtered, $\delta$ = %s' % delta, alpha=0.5)
plt.legend(loc='best')
plt.figure()
plt.suptitle('Outlier detection algorithm changing the mad_factor parameter')
for i, mad_factor in enumerate([1, 3]):
plt.subplot(2,1,i+1)
x_wo_outliers = remove_outliers(x_w_outliers, delta=1, mad_factor=mad_factor)
plt.plot(x_w_outliers, label='Original data')
plt.plot(x_wo_outliers, linewidth=5, label='Without outliers, mad_factor = %s' % mad_factor, alpha=0.5)
plt.legend(loc='best')
plt.show()
| apache-2.0 |
larrybradley/astropy | astropy/visualization/wcsaxes/coordinates_map.py | 8 | 7817 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from textwrap import indent
from collections import OrderedDict
from .coordinate_helpers import CoordinateHelper
from .frame import RectangularFrame, RectangularFrame1D
from .coordinate_range import find_coordinate_range
class CoordinatesMap:
"""
A container for coordinate helpers that represents a coordinate system.
This object can be used to access coordinate helpers by index (like a list)
or by name (like a dictionary).
Parameters
----------
axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate map belongs to.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data.
coord_meta : dict, optional
A dictionary providing additional metadata. This should include the keys
``type``, ``wrap``, and ``unit``. Each of these should be a list with as
many items as the dimension of the coordinate system. The ``type``
entries should be one of ``longitude``, ``latitude``, or ``scalar``, the
``wrap`` entries should give, for the longitude, the angle at which the
coordinate wraps (and `None` otherwise), and the ``unit`` should give
the unit of the coordinates as :class:`~astropy.units.Unit` instances.
This can optionally also include a ``format_unit`` entry giving the
units to use for the tick labels (if not specified, this defaults to
``unit``).
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
previous_frame_path : `~matplotlib.path.Path`, optional
When changing the WCS of the axes, the frame instance will change but
we might want to keep re-using the same underlying matplotlib
`~matplotlib.path.Path` - in that case, this can be passed to this
keyword argument.
"""
def __init__(self, axes, transform=None, coord_meta=None,
frame_class=RectangularFrame, previous_frame_path=None):
self._axes = axes
self._transform = transform
self.frame = frame_class(axes, self._transform, path=previous_frame_path)
# Set up coordinates
self._coords = []
self._aliases = {}
visible_count = 0
for index in range(len(coord_meta['type'])):
# Extract coordinate metadata
coord_type = coord_meta['type'][index]
coord_wrap = coord_meta['wrap'][index]
coord_unit = coord_meta['unit'][index]
name = coord_meta['name'][index]
visible = True
if 'visible' in coord_meta:
visible = coord_meta['visible'][index]
format_unit = None
if 'format_unit' in coord_meta:
format_unit = coord_meta['format_unit'][index]
default_label = name[0] if isinstance(name, (tuple, list)) else name
if 'default_axis_label' in coord_meta:
default_label = coord_meta['default_axis_label'][index]
coord_index = None
if visible:
visible_count += 1
coord_index = visible_count - 1
self._coords.append(CoordinateHelper(parent_axes=axes,
parent_map=self,
transform=self._transform,
coord_index=coord_index,
coord_type=coord_type,
coord_wrap=coord_wrap,
coord_unit=coord_unit,
format_unit=format_unit,
frame=self.frame,
default_label=default_label))
# Set up aliases for coordinates
if isinstance(name, tuple):
for nm in name:
nm = nm.lower()
# Do not replace an alias already in the map if we have
# more than one alias for this axis.
if nm not in self._aliases:
self._aliases[nm] = index
else:
self._aliases[name.lower()] = index
def __getitem__(self, item):
if isinstance(item, str):
return self._coords[self._aliases[item.lower()]]
else:
return self._coords[item]
def __contains__(self, item):
if isinstance(item, str):
return item.lower() in self._aliases
else:
return 0 <= item < len(self._coords)
def set_visible(self, visibility):
raise NotImplementedError()
def __iter__(self):
for coord in self._coords:
yield coord
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
for coord in self:
coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)
def get_coord_range(self):
xmin, xmax = self._axes.get_xlim()
if isinstance(self.frame, RectangularFrame1D):
extent = [xmin, xmax]
else:
ymin, ymax = self._axes.get_ylim()
extent = [xmin, xmax, ymin, ymax]
return find_coordinate_range(self._transform,
extent,
[coord.coord_type for coord in self if coord.coord_index is not None],
[coord.coord_unit for coord in self if coord.coord_index is not None],
[coord.coord_wrap for coord in self if coord.coord_index is not None])
def _as_table(self):
# Import Table here to avoid importing the astropy.table package
# every time astropy.visualization.wcsaxes is imported.
from astropy.table import Table # noqa
rows = []
for icoord, coord in enumerate(self._coords):
aliases = [key for key, value in self._aliases.items() if value == icoord]
row = OrderedDict([('index', icoord), ('aliases', ' '.join(aliases)),
('type', coord.coord_type), ('unit', coord.coord_unit),
('wrap', coord.coord_wrap), ('format_unit', coord.get_format_unit()),
('visible', 'no' if coord.coord_index is None else 'yes')])
rows.append(row)
return Table(rows=rows)
def __repr__(self):
s = f'<CoordinatesMap with {len(self._coords)} world coordinates:\n\n'
table = indent(str(self._as_table()), ' ')
return s + table + '\n\n>'
| bsd-3-clause |
hparik11/Deep-Learning-Nanodegree-Foundation-Repository | reinforcement/gym/gym/utils/play.py | 4 | 6836 | import gym
import pygame
import sys
import time
import matplotlib
import matplotlib.pyplot as plt
from collections import deque
from pygame.locals import HWSURFACE, DOUBLEBUF, RESIZABLE, VIDEORESIZE
from threading import Thread
try:
matplotlib.use('GTK3Agg')
except Exception:
pass
def display_arr(screen, arr, video_size, transpose):
arr_min, arr_max = arr.min(), arr.max()
arr = 255.0 * (arr - arr_min) / (arr_max - arr_min)
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr)
pyg_img = pygame.transform.scale(pyg_img, video_size)
screen.blit(pyg_img, (0,0))
def play(env, transpose=True, fps=30, zoom=None, callback=None, keys_to_action=None):
"""Allows one to play the game using keyboard.
To simply play the game use:
play(gym.make("Pong-v3"))
Above code works also if env is wrapped, so it's particularly useful in
verifying that the frame-level preprocessing does not render the game
unplayable.
If you wish to plot real time statistics as you play, you can use
gym.utils.play.PlayPlot. Here's a sample code for plotting the reward
for last 5 second of gameplay.
def callback(obs_t, obs_tp1, rew, done, info):
return [rew,]
env_plotter = EnvPlotter(callback, 30 * 5, ["reward"])
env = gym.make("Pong-v3")
play(env, callback=env_plotter.callback)
Arguments
---------
env: gym.Env
Environment to use for playing.
transpose: bool
If True the output of observation is transposed.
Defaults to true.
fps: int
Maximum number of steps of the environment to execute every second.
Defaults to 30.
zoom: float
Make screen edge this many times bigger
callback: lambda or None
Callback if a callback is provided it will be executed after
every step. It takes the following input:
obs_t: observation before performing action
obs_tp1: observation after performing action
action: action that was executed
rew: reward that was received
done: whether the environemnt is done or not
info: debug info
keys_to_action: dict: tuple(int) -> int or None
Mapping from keys pressed to action performed.
For example if pressed 'w' and space at the same time is supposed
to trigger action number 2 then key_to_action dict would look like this:
{
# ...
sorted(ord('w'), ord(' ')) -> 2
# ...
}
If None, default key_to_action mapping for that env is used, if provided.
"""
obs_s = env.observation_space
assert type(obs_s) == gym.spaces.box.Box
assert len(obs_s.shape) == 2 or (len(obs_s.shape) == 3 and obs_s.shape[2] in [1,3])
if keys_to_action is None:
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
assert False, env.spec.id + " does not have explicit key to action mapping, " + \
"please specify one manually"
relevant_keys = set(sum(map(list, keys_to_action.keys()),[]))
if transpose:
video_size = env.observation_space.shape[1], env.observation_space.shape[0]
else:
video_size = env.observation_space.shape[0], env.observation_space.shape[1]
if zoom is not None:
video_size = int(video_size[0] * zoom), int(video_size[1] * zoom)
pressed_keys = []
running = True
env_done = True
screen = pygame.display.set_mode(video_size)
clock = pygame.time.Clock()
while running:
if env_done:
env_done = False
obs = env.reset()
else:
action = keys_to_action[tuple(sorted(pressed_keys))]
prev_obs = obs
obs, rew, env_done, info = env.step(action)
if callback is not None:
callback(prev_obs, obs, action, rew, env_done, info)
if obs is not None:
if len(obs.shape) == 2:
obs = obs[:, :, None]
if obs.shape[2] == 1:
obs = obs.repeat(3, axis=2)
display_arr(screen, obs, transpose=transpose, video_size=video_size)
# process pygame events
for event in pygame.event.get():
# test events, set key states
if event.type == pygame.KEYDOWN:
if event.key in relevant_keys:
pressed_keys.append(event.key)
elif event.key == 27:
running = False
elif event.type == pygame.KEYUP:
if event.key in relevant_keys:
pressed_keys.remove(event.key)
elif event.type == pygame.QUIT:
running = False
elif event.type == VIDEORESIZE:
video_size = event.size
screen = pygame.display.set_mode(video_size)
print(video_size)
pygame.display.flip()
clock.tick(fps)
pygame.quit()
class PlayPlot(object):
def __init__(self, callback, horizon_timesteps, plot_names):
self.data_callback = callback
self.horizon_timesteps = horizon_timesteps
self.plot_names = plot_names
num_plots = len(self.plot_names)
self.fig, self.ax = plt.subplots(num_plots)
if num_plots == 1:
self.ax = [self.ax]
for axis, name in zip(self.ax, plot_names):
axis.set_title(name)
self.t = 0
self.cur_plot = [None for _ in range(num_plots)]
self.data = [deque(maxlen=horizon_timesteps) for _ in range(num_plots)]
def callback(self, obs_t, obs_tp1, action, rew, done, info):
points = self.data_callback(obs_t, obs_tp1, action, rew, done, info)
for point, data_series in zip(points, self.data):
data_series.append(point)
self.t += 1
xmin, xmax = max(0, self.t - self.horizon_timesteps), self.t
for i, plot in enumerate(self.cur_plot):
if plot is not None:
plot.remove()
self.cur_plot[i] = self.ax[i].scatter(range(xmin, xmax), list(self.data[i]))
self.ax[i].set_xlim(xmin, xmax)
plt.pause(0.000001)
if __name__ == '__main__':
from rl_algs.common.atari_wrappers import wrap_deepmind
def callback(obs_t, obs_tp1, action, rew, done, info):
return [rew, obs_t.mean()]
env_plotter = EnvPlotter(callback, 30 * 5, ["reward", "mean intensity"])
env = gym.make("MontezumaRevengeNoFrameskip-v3")
env = wrap_deepmind(env)
play_env(env, zoom=4, callback=env_plotter.callback, fps=30)
| mit |
madjelan/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
grhawk/ASE | ase/utils/sphinx.py | 2 | 5271 | import os
import types
import warnings
from os.path import join
from stat import ST_MTIME
from docutils import nodes
from docutils.parsers.rst.roles import set_classes
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def mol_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
n = []
t = ''
while text:
if text[0] == '_':
n.append(nodes.Text(t))
t = ''
n.append(nodes.subscript(text=text[1]))
text = text[2:]
else:
t += text[0]
text = text[1:]
n.append(nodes.Text(t))
return n, []
def svn_role_tmpl(urlroot,
role,
rawtext, text, lineno, inliner, options={}, content=[]):
if text[-1] == '>':
i = text.index('<')
name = text[:i - 1]
text = text[i + 1:-1]
else:
name = text
if name[0] == '~':
name = name.split('/')[-1]
text = text[1:]
if '?' in name:
name = name[:name.index('?')]
ref = urlroot + text
set_classes(options)
node = nodes.reference(rawtext, name, refuri=ref,
**options)
return [node], []
def trac_role_tmpl(urlroot,
role,
rawtext, text, lineno, inliner, options={}, content=[]):
if text[-1] == '>':
i = text.index('<')
name = text[:i - 1]
text = text[i + 1:-1]
else:
name = text
if name[0] == '~':
name = name.split('/')[-1]
text = text[1:]
if '?' in name:
name = name[:name.index('?')]
ref = urlroot + text
set_classes(options)
node = nodes.reference(rawtext, name, refuri=ref,
**options)
return [node], []
def epydoc_role_tmpl(package_name, urlroot,
role,
rawtext, text, lineno, inliner, options={}, content=[]):
name = None
if text[-1] == '>':
i = text.index('<')
name = text[:i - 1]
text = text[i + 1:-1]
components = text.split('.')
if components[0] != package_name:
components.insert(0, package_name)
if name is None:
name = components[-1]
try:
module = None
for n in range(2, len(components) + 1):
module = __import__('.'.join(components[:n]))
except ImportError:
if module is None:
print 'epydoc: could not process: %s' % str(components)
raise
for component in components[1:n]:
module = getattr(module, component)
ref = '.'.join(components[:n])
if isinstance(module, (type, types.ClassType)):
ref += '-class.html'
else:
ref += '-module.html'
if n < len(components):
ref += '#' + components[-1]
else:
ref = '.'.join(components) + '-module.html'
ref = urlroot + ref
set_classes(options)
node = nodes.reference(rawtext, name,
refuri=ref,
**options)
return [node], []
def create_png_files():
errcode = os.system('povray -h 2> /dev/null')
if errcode:
warnings.warn('No POVRAY!')
# Replace write_pov with write_png:
from ase.io import pov
from ase.io.png import write_png
def write_pov(filename, atoms, run_povray=False, **parameters):
p = {}
for key in ['rotation', 'show_unit_cell', 'radii',
'bbox', 'colors', 'scale']:
if key in parameters:
p[key] = parameters[key]
write_png(filename[:-3] + 'png', atoms, **p)
pov.write_pov = write_pov
olddir = os.getcwd()
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.py'):
path = join(dirpath, filename)
lines = open(path).readlines()
try:
line = lines[0]
except IndexError:
continue
if 'coding: utf-8' in line:
line = lines[1]
if line.startswith('# creates:'):
t0 = os.stat(path)[ST_MTIME]
run = False
for file in line.split()[2:]:
try:
t = os.stat(join(dirpath, file))[ST_MTIME]
except OSError:
run = True
break
else:
if t < t0:
run = True
break
if run:
print('running:', join(dirpath, filename))
os.chdir(dirpath)
plt.figure()
try:
execfile(filename, {})
finally:
os.chdir(olddir)
for file in line.split()[2:]:
print(dirpath, file)
if '.svn' in dirnames:
dirnames.remove('.svn')
| gpl-2.0 |
xubenben/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
kastnerkyle/pylearn2 | pylearn2/sandbox/cuda_convnet/specialized_bench.py | 5 | 3863 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
| bsd-3-clause |
kazemakase/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
nanophotonics/nplab | nplab/experiment/hyperspectral_imaging/analysis/hyperspectral_plots.py | 1 | 7099 | from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
__author__ = 'alansanders'
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
# required for formatting image axes
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.ticker import AutoMinorLocator, MaxNLocator
from matplotlib.ticker import NullLocator, NullFormatter
from matplotlib import cm, gridspec
from nputils.plotting.plot_functions import scale_axes
from nputils.plotting import np_cmap
locs = {'upper right' : (0.95,0.95),
'upper left' : (0.05,0.95),
'lower left' : (0.05,0.05),
'lower right' : (0.95,0.15)}
def _plot_image(x, y, z, ax, **kwargs):
img_kwargs = {'vmin': z.min(), 'vmax': z.max(), 'cmap': cm.afmhot,
'rasterized': True, 'shading': 'gouraud'}
for k in kwargs:
img_kwargs[k] = kwargs[k]
x, y = np.meshgrid(x, y, indexing='ij')
img = ax.pcolormesh(x, y, z, **img_kwargs)
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
return img
def _format_image_plot(ax, xlabel=None, ylabel=None, invert=False):
ax.minorticks_on()
ax.set_aspect('equal')
ax.tick_params(axis='both', which='major', labelsize='small')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_locator(MaxNLocator(5))
axis.set_minor_locator(AutoMinorLocator(4))
axis.set_major_formatter(FormatStrFormatter('%d'))
if invert:
[i.set_color('white') for i in ax.xaxis.get_ticklines()]
[i.set_color('white') for i in ax.yaxis.get_ticklines()]
[ax.spines[s].set_color('white') for s in ax.spines]
if xlabel is not None:
ax.set_xlabel(xlabel)
else:
plt.setp(ax.get_xticklabels(), visible=False)
if ylabel is not None:
ax.set_ylabel(ylabel)
else:
plt.setp(ax.get_yticklabels(), visible=False)
def plot_wavelength(hs_image, ax, wl, polarisation=1, wlnorm=True, rescale_axes=False,
smoothing=None, contour_lines=None, mult=1.,
loc='upper right', xlabels=True, ylabels=True, threshold=None,
img_kwargs={}, contour_kwargs={'colors':'k'}, **kwargs):
"""Plots the hyperspectral image at the selected wavelength on a given axis."""
image = hs_image.get_image(wl, polarisation, **kwargs)
if wlnorm:
minimum, maximum = (image.min(), image.max())
else:
wavelength, spectra = hs_image.get_spectra(polarisation)
minimum, maximum = (spectra.min(), spectra.max())
if threshold is not None:
maximum = threshold*maximum + (1-threshold)*minimum
x, unit = scale_axes(hs_image.x)
y, unit = scale_axes(hs_image.y)
if rescale_axes:
x -= x.mean()
y -= y.mean()
image *= mult
if smoothing is not None:
image = gaussian_filter(image, smoothing)
img = _plot_image(x, y, image, ax, **img_kwargs)
if contour_lines is not None:
ax.contour(x, y, image, contour_lines, **contour_kwargs)
if xlabels: xlabel = '$x$ (%s)'%unit
else: xlabel = None
if ylabels: ylabel = '$y$ (%s)'%unit
else: ylabel = None
_format_image_plot(ax, xlabel=xlabel, ylabel=ylabel)
tx, ty = locs[loc]
ax.text(tx, ty, str(wl)+' nm', va='top', ha='right',
transform=ax.transAxes, color='white', fontsize='small', fontweight='bold')
return img
def plot_colour_map(hs_image, ax, polarisation=1, norm=True,
smoothing=None,
loc='upper right', xlabels=True, ylabels=True,
img_kwargs={}, **kwargs):
"""Plots the hyperspectral image at the selected wavelength on a given axis."""
image = hs_image.construct_colour_map(polarisation, norm, **kwargs)
x, unit = scale_axes(hs_image.x)
y, unit = scale_axes(hs_image.y)
if smoothing is not None:
image = gaussian_filter(image, smoothing)
img = _plot_image(x, y, image, ax, **img_kwargs)
if xlabels: xlabel = '$x$ (%s)'%unit
else: xlabel = None
if ylabels: ylabel = '$y$ (%s)'%unit
else: ylabel = None
_format_image_plot(ax, xlabel=xlabel, ylabel=ylabel)
#tx, ty = locs[loc]
#ax.text(tx, ty, 'false colour', va='top', ha='right',
# transform=ax.transAxes, color='white', fontsize='small')
return img
def plot_colour(hs_image, ax, amp=1, smoothing=None, loc='upper right',
xlabels=True, ylabels=True,
**kwargs):
ax.set_axis_bgcolor('black')
image = hs_image.reconstruct_colour_image(**kwargs)
if smoothing is not None:
image = gaussian_filter(image, smoothing)
image[:,:,3] = amp*image[:,:,3]
image[:,:,3] = np.where(image[:,:,3] > 1, 1, image[:,:,3])
x, unit = scale_axes(hs_image.x)
y, unit = scale_axes(hs_image.y)
limits = np.array([x.min(), x.max(),
y.min(), y.max()])
ax.imshow(image, origin='lower', extent=limits)
ax.set_xlim(limits[0], limits[1])
ax.set_ylim(limits[2], limits[3])
if xlabels: xlabel = '$x$ (%s)'%unit
else: xlabel = None
if ylabels: ylabel = '$y$ (%s)'%unit
else: ylabel = None
_format_image_plot(ax, xlabel=xlabel, ylabel=ylabel)
tx, ty = locs[loc]
ax.text(tx, ty, 'colour', va='top', ha='right',
transform=ax.transAxes, color='white', fontsize='small')
return image
def plot_line_scan(hs_image, ax, axis, line, imnorm=False, linenorm=False, dat='', smooth=None):
if dat=='':
spectra = hs_image.spectra
wavelength = hs_image.wavelength
elif dat=='trans':
data = hs_image.data_t
wavelength = hs_image.wavelength_t
line_spectra = hs_image.get_line_spectra(axis, line, dat)
if smooth != None: line_spectra = gaussian_filter(line_spectra, smooth)
if linenorm == True:
for i in range(line_spectra.shape[0]):
line_spectra[i] = old_div(line_spectra[i], line_spectra[i].max())
if imnorm == True:
minimum = line_spectra.min()
maximum = line_spectra.max()
else:
minimum = data.min() * (data.min()>=0.0)
maximum = data.max() * (data.max()>=0.0)
#lev_exp = np.linspace(np.log10(minimum), np.log10(maximum), 200)
#levs = np.power(10, lev_exp)
#norm = LogNorm(minimum, maximum)
levs = np.linspace(minimum, maximum, 200)
norm = cm.colors.Normalize(vmax=maximum, vmin=minimum)
X, Y = np.meshgrid(hs_image.x, wavelength, indexing='ij')
ax.contourf(X, Y, line_spectra, levs, norm=norm, cmap=cm.CMRmap)
if axis=='x': label = 'y={0:.2f} nm'.format(hs_image.x[line])
if axis=='y': label = 'x={0:.2f} nm'.format(hs_image.y[line])
ax.text(0.95, 0.95, label, va='top', ha='right',
transform=ax.transAxes, color='white', fontsize='small')
ax.tick_params(axis='both', which='major', labelsize='small')
ax.set_xlim(hs_image.x.min(), hs_image.x.max())
ax.set_ylim(wavelength.min(), wavelength.max())
_format_image_plot(ax)
return line_spectra | gpl-3.0 |
caganze/wisps | wisps/simulations/selection_function.py | 1 | 2456 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
############################
#Selection function routines
#NEED MORE COMMENTS
########################
#imports
import splat
import wisps
import matplotlib.pyplot as plt
from wisps import datasets,make_spt_number
from wisps.simulations import selection_function
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
import splat.empirical as spem
import copy
from wisps.data_analysis import selection_criteria as sel_crt
import pickle
import numba
import copy
from concurrent.futures import ThreadPoolExecutor, wait , ALL_COMPLETED
from functools import partial
def add_noise_to_spectrum(sp, snr):
#if I propose a larger SNR don't do anything to save time
sp.reset()
sp_old=sp.spectral_type
sp.add_noise(snr, nsample=1, recompute_indices= True)
f_test={"f_test": sp.f_test, 'line_chi': sp.line_chi, 'spex_chi': sp.spex_chi, 'spt_new': sp.spectral_type, 'sp_old': sp_old, 'dof': sp.dof}
res_dict= {**sp.snr, **sp.indices, **f_test}
sp.reset()
return res_dict
def add_multiple_noises(sp, noises):
res=list(map(lambda x: add_noise_to_spectrum(sp, x), noises))
return res
def make_data(spectra, **kwargs):
"""
create a selection function from a list of spectra and spts
"""
results=[]
nsample=kwargs.get("nsample", 1000)
#for sx, spt in zip(spectra, spts):
# results.append(self.generate_spectra(sx, spt, **kwargs))
#run this in parallel
snrs=10**np.random.uniform(-1,3,(len(spectra), nsample))
iterables=([spectra, snrs])
method=partial(add_multiple_noises)
with ThreadPoolExecutor(max_workers=100) as executor:
futures=list(executor.map( method, *iterables, timeout=None, chunksize=10))
results=[x for x in futures]
return pd.DataFrame.from_records(results)
def create_selection_function(**kwargs):
"""
Create a selection with data
"""
#optional inputs
output_file=kwargs.get('output_file', wisps.OUTPUT_FILES+'/selection_function.pkl')
spectra=pd.read_pickle(wisps.OUTPUT_FILES+'/l_t_dwarfs_spex.pkl')
splat.initializeStandards()
#set up the selection
def convert_to_string(x):
if isinstance(x, str):
return x
else:
return splat.typeToNum(x)
res=make_data(spectra, **kwargs)
with open(output_file, 'wb') as file:
pickle.dump(res,file)
return | mit |
rudhir-upretee/SUMO_Src | tools/projects/TaxiFCD_Krieg/src/fcdToRoutes/My_mpl_dump_onNet.py | 3 | 18287 | #!/usr/bin/env python
"""
@file My_mpl_dump_onNet.py
@author Daniel Krajzewicz
@author Sascha Krieg
@author Michael Behrisch
@date 2007-10-25
@version $Id: My_mpl_dump_onNet.py 11671 2012-01-07 20:14:30Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
matplotlib has to be installed for this purpose
-n E:/DLR/Projekte/Diplom/Daten/sumoNetzFilesNurnbergIV/nuernberg_vls_new.net.xml
-d E:/DLR/Projekte/Diplom/Daten/mpl_dump_onNet__Files/FCD_vs_completeRoute351_11.out.xml
--values no,no --show --color-map 0:#888888,.4:#ff0000,1:#00ff00
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0])*16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap)-1):
if colormap[i+1][0]>val:
scale = (val - colormap[i][0]) / (colormap[i+1][0] - colormap[i][0])
r = colormap[i][1][0] + (colormap[i+1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + (colormap[i+1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + (colormap[i+1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = []
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
r = color[1:3]
g = color[3:5]
b = color[5:7]
ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
return ret
class NetReader(handler.ContentHandler):
"""Reads a network, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self):
self._id = ''
self._edge2lanes = {}
self._edge2speed = {}
self._edge2shape = {}
self._edge2from = {}
self._edge2to = {}
self._node2x = {}
self._node2y = {}
self._currentShapes = []
self._parseLane = False
def startElement(self, name, attrs):
self._parseLane = False
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] != 'internal':
self._id = attrs['id']
self._edge2from[attrs['id']] = attrs['from']
self._edge2to[attrs['id']] = attrs['to']
self._edge2lanes[attrs['id']] = 0
self._currentShapes = []
else:
self._id = ""
if name == 'lane' and self._id!="":
self._edge2speed[self._id] = float(attrs['maxspeed'])
self._edge2lanes[self._id] = self._edge2lanes[self._id] + 1
self._parseLane = True
self._currentShapes.append("")
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = attrs['x']
self._node2y[attrs['id']] = attrs['y']
else:
self._id = ""
def characters(self, content):
if self._parseLane:
self._currentShapes[-1] = self._currentShapes[-1] + content
def endElement(self, name):
if self._parseLane:
self._parseLane = False
if name == 'edge' and self._id!="":
noShapes = len(self._currentShapes)
if noShapes%2 == 1 and noShapes>0:
self._edge2shape[self._id] = self._currentShapes[int(noShapes/2)]
elif noShapes%2 == 0 and len(self._currentShapes[0])!=2:
cshapes = []
minLen = -1
for i in self._currentShapes:
cshape = []
es = i.split(" ")
for e in es:
p = e.split(",")
cshape.append((float(p[0]), float(p[1])))
cshapes.append(cshape)
if minLen==-1 or minLen>len(cshape):
minLen = len(cshape)
self._edge2shape[self._id] = ""
if minLen>2:
for i in range(0, minLen):
x = 0.
y = 0.
for j in range(0, noShapes):
x = x + cshapes[j][i][0]
y = y + cshapes[j][i][1]
x = x / float(noShapes)
y = y / float(noShapes)
if self._edge2shape[self._id] != "":
self._edge2shape[self._id] = self._edge2shape[self._id] + " "
self._edge2shape[self._id] = self._edge2shape[self._id] + str(x) + "," + str(y)
def plotData(self, weights, options, values1, values2, saveName, colorMap):
edge2plotLines = {}
edge2plotColors = {}
edge2plotWidth = {}
xmin = 10000000.
xmax = -10000000.
ymin = 10000000.
ymax = -10000000.
min_width = 0
if options.min_width:
min_width = options.min_width
for edge in self._edge2from:
# compute shape
xs = []
ys = []
if edge not in self._edge2shape or self._edge2shape[edge]=="":
xs.append(float(self._node2x[self._edge2from[edge]]))
xs.append(float(self._node2x[self._edge2to[edge]]))
ys.append(float(self._node2y[self._edge2from[edge]]))
ys.append(float(self._node2y[self._edge2to[edge]]))
else:
shape = self._edge2shape[edge].split(" ")
l = []
for s in shape:
p = s.split(",")
xs.append(float(p[0]))
ys.append(float(p[1]))
for x in xs:
if x<xmin:
xmin = x
if x>xmax:
xmax = x
for y in ys:
if y<ymin:
ymin = y
if y>ymax:
ymax = y
# save shape
edge2plotLines[edge] = (xs, ys)
# compute color
if edge in values2:
# print values2[edge]
c = values2[edge]
else:
c = 0
edge2plotColors[edge] = toColor(c, colorMap)
# compute width
if edge in values1:
edge2plotWidth[edge] = 1.0
else:
edge2plotWidth[edge] = 0.2
if options.verbose:
print "x-limits: " + str(xmin) + " - " + str(xmax)
print "y-limits: " + str(ymin) + " - " + str(ymax)
# set figure size
if not options.show:
rcParams['backend'] = 'Agg'
if options.size:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
plot([-1000,-2000], [-1000,-2000], color=toColor(.9, colorMap), label="gegeben")
plot([-1000,-2000], [-1000,-2000], color=toColor(.5, colorMap), label="hinzugefuegt")
plot([-1000,-2000], [-1000,-2000], color=toColor(0, colorMap), label="nicht befahren")
for edge in edge2plotLines:
plot(edge2plotLines[edge][0], edge2plotLines[edge][1], color=edge2plotColors[edge], linewidth=edge2plotWidth[edge])
legend()
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(xb, xe)
else:
xlim(xmin, xmax)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(yb, ye)
else:
ylim(ymin, ymax)
#legend
#legend(('green','red'))
if options.show:
show()
if saveName:
savefig(saveName);
def plot(self, weights, options, colorMap):
self._minValue1 = weights._minValue1
self._minValue2 = weights._minValue2
self._maxValue1 = weights._maxValue1
self._maxValue2 = weights._maxValue2
if options.join:
self.plotData(weights, options, weights._edge2value1, weights._edge2value2, options.output, colorMap)
else:
for i in weights._intervalBegins:
if options.verbose:
print " Processing step %d..." % i
output = options.output
if output:
output = output % i
self.plotData(weights, options, weights._unaggEdge2value1[i], weights._unaggEdge2value2[i], output, colorMap )
def knowsEdge(self, id):
return id in self._edge2from
class WeightsReader(handler.ContentHandler):
"""Reads the dump file"""
def __init__(self, net, value1, value2):
self._id = ''
self._edge2value2 = {}
self._edge2value1 = {}
self._edge2no1 = {}
self._edge2no2 = {}
self._net = net
self._intervalBegins = []
self._unaggEdge2value2 = {}
self._unaggEdge2value1 = {}
self._beginTime = -1
self._value1 = value1
self._value2 = value2
def startElement(self, name, attrs):
if name == 'interval':
self._beginTime = int(attrs['begin'])
self._intervalBegins.append(self._beginTime)
self._unaggEdge2value2[self._beginTime] = {}
self._unaggEdge2value1[self._beginTime] = {}
if name == 'edge':
if self._net.knowsEdge(attrs['id']):
self._id = attrs['id']
if self._id not in self._edge2value2:
self._edge2value2[self._id] = 0
self._edge2value1[self._id] = 0
self._edge2no1[self._id] = 0
self._edge2no2[self._id] = 0
value1 = self._value1
if attrs.has_key(value1):
value1 = float(attrs[value1])
self._edge2no1[self._id] = self._edge2no1[self._id] + 1
else:
value1 = float(value1)
self._edge2value1[self._id] = self._edge2value1[self._id] + value1
self._unaggEdge2value1[self._beginTime][self._id] = value1
value2 = self._value2
if attrs.has_key(value2):
value2 = float(attrs[value2])
self._edge2no2[self._id] = self._edge2no2[self._id] + 1
else:
value2 = float(value2)
self._edge2value2[self._id] = self._edge2value2[self._id] + value2
self._unaggEdge2value2[self._beginTime][self._id] = value2
def updateExtrema(self, values1ByEdge, values2ByEdge):
for edge in values1ByEdge:
if self._minValue1==-1 or self._minValue1>values1ByEdge[edge]:
self._minValue1 = values1ByEdge[edge]
if self._maxValue1==-1 or self._maxValue1<values1ByEdge[edge]:
self._maxValue1 = values1ByEdge[edge]
if self._minValue2==-1 or self._minValue2>values2ByEdge[edge]:
self._minValue2 = values2ByEdge[edge]
if self._maxValue2==-1 or self._maxValue2<values2ByEdge[edge]:
self._maxValue2 = values2ByEdge[edge]
def valueDependantNorm(self, values, minV, maxV, tendency, percSpeed):
if tendency:
for edge in self._edge2value2:
if values[edge]<0:
values[edge] = 0
else:
values[edge] = 1
# elif percSpeed:
# for edge in self._edge2value2:
# values[edge] = (values[edge] / self._net._edge2speed[edge])
# elif minV!=maxV:
# for edge in self._edge2value2:
# values[edge] = (values[edge] - minV) / (maxV - minV)
def norm(self, tendency, percSpeed):
self._minValue1 = -1
self._maxValue1 = -1
self._minValue2 = -1
self._maxValue2 = -1
# compute mean value if join is set
if options.join:
for edge in self._edge2value2:
if float(self._edge2no1[edge])!=0:
self._edge2value1[edge] = float(self._edge2value1[edge]) / float(self._edge2no1[edge])
else:
self._edge2value1[edge] = float(self._edge2value1[edge])
if float(self._edge2no2[edge])!=0:
self._edge2value2[edge] = float(self._edge2value2[edge]) / float(self._edge2no2[edge])
else:
print "ha"
self._edge2value2[edge] = float(self._edge2value2[edge])
# compute min/max
if options.join:
self.updateExtrema(self._edge2value1, self._edge2value2)
else:
for i in weights._intervalBegins:
self.updateExtrema(self._unaggEdge2value1[i], self._unaggEdge2value2[i])
# norm
if options.verbose:
print "w range: " + str(self._minValue1) + " - " + str(self._maxValue1)
print "c range: " + str(self._minValue2) + " - " + str(self._maxValue2)
if options.join:
self.valueDependantNorm(self._edge2value1, self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._edge2value2, self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
else:
for i in weights._intervalBegins:
self.valueDependantNorm(self._unaggEdge2value1[i], self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._unaggEdge2value2[i], self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-d", "--dump", dest="dump",
help="dump file to use", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="(base) name for the output", metavar="FILE")
# data handling
optParser.add_option("-j", "--join", action="store_true", dest="join",
default=False, help="sums up values from all read intervals")
optParser.add_option("-w", "--min-width", dest="min_width",
type="float", help="sets minimum line width")
optParser.add_option("-W", "--max-width", dest="max_width",
type="float", help="sets maximum line width")
optParser.add_option("-c", "--min-color", dest="min_color",
type="float", help="sets minimum color (between 0 and 1)")
optParser.add_option("-C", "--max-color", dest="max_color",
type="float", help="sets maximum color (between 0 and 1)")
optParser.add_option("--tendency-coloring", action="store_true", dest="tendency_coloring",
default=False, help="show only 0/1 color for egative/positive values")
optParser.add_option("--percentage-speed", action="store_true", dest="percentage_speed",
default=False, help="speed is normed to maximum allowed speed on an edge")
optParser.add_option("--values", dest="values",
type="string", default="entered,speed", help="which values shall be parsed")
optParser.add_option("--color-map", dest="colormap",
type="string", default="0:#ff0000,.5:#ffff00,1:#00ff00", help="Defines the color map")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# output
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows each plot after generating it")
# parse options
(options, args) = optParser.parse_args()
# init color map
colorMap = parseColorMap(options.colormap)
# read network
if options.verbose:
print "Reading net..."
parser = make_parser()
net = NetReader()
parser.setContentHandler(net)
parser.parse(options.net)
# read weights
if options.verbose:
print "Reading weights..."
mValues = options.values.split(",")
weights = WeightsReader(net, mValues[0], mValues[1])
parser.setContentHandler(weights)
parser.parse(options.dump)
# process
if options.verbose:
print "Norming weights..."
weights.norm(options.tendency_coloring, options.percentage_speed)
if options.verbose:
print "Plotting..."
net.plot(weights, options, colorMap)
| gpl-3.0 |
ppries/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 13 | 3098 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
pagutierrez/tutorial-sklearn | notebooks-spanish/figures/plot_2d_separator.py | 41 | 1513 | import numpy as np
import matplotlib.pyplot as plt
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None):
if eps is None:
eps = X.std() / 2.
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0]
fill_levels = [decision_values.min(), 0, decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5]
fill_levels = [0, .5, 1]
if ax is None:
ax = plt.gca()
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, colors=['blue', 'red'])
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black")
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if __name__ == '__main__':
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
X, y = make_blobs(centers=2, random_state=42)
clf = LogisticRegression().fit(X, y)
plot_2d_separator(clf, X, fill=True)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
| cc0-1.0 |
gewaltig/cython-neuron | testsuite/manualtests/stdp_dopa_check.py | 2 | 10096 | #! /usr/bin/env python
#
# stdp_dopa_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
import numpy as n
# Test script to reproduce changes in weight of a dopamine modulated STDP synapse in an event-driven way.
# Pre- and post-synaptic spike trains are read in from spikes-6-0.gdf
# (output of test_stdp_dopa.py).
# output: pre/post/dopa \t spike time \t weight
#
# Synaptic dynamics for dopamine modulated STDP synapses as used in [1], based on [2]
#
# References:
# [1] Potjans W, Morrison A and Diesmann M (2010). Enabling functional neural circuit simulations with distributed computing of neuromodulated plasticity. Front. Comput. Neurosci. 4:141. doi:10.3389/fncom.2010.00141
# [2] Izhikevich, E. M. (2007). Solving the distal reward problem through linkage of STDP and dopamine signaling. Cereb. Cortex 17(10), 2443-2452.
#
# author: Wiebke Potjans, October 2010
def stdp_dopa(w_init, pre_spikes, post_spikes, dopa_spikes, tau_e, tau_d, A_minus, A_plus, tau_plus, tau_minus, dendritic_delay, delay_d):
w = w_init # initial weight
w_min = 0. # minimal weight
w_max = 200. #maximal weight
i=0 # index of presynaptic spike
j=0 # index of postsynaptic spike
k=0 # index of dopamine spike
last_post_spike = dendritic_delay
Etrace = 0.
Dtrace = 0.
last_e_update = 0.
last_w_update = 0.
last_pre_spike = 0.
last_dopa_spike = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if ((pre_spikes[i] < post_spikes[j]) and (pre_spikes[i] < dopa_spikes[k])):
dt = pre_spikes[i] - last_post_spike
# weight update
w = w + Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d)-exp((last_e_update-pre_spikes[i])/tau_e)*exp((last_dopa_spike-pre_spikes[i])/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "pre\t%.4f\t%.4f" % (pre_spikes[i],w)
last_w_update = pre_spikes[i]
Etrace = Etrace * exp((last_e_update - pre_spikes[i])/tau_e) - A_minus*exp(-dt/tau_minus)
last_e_update = pre_spikes[i]
last_pre_spike = pre_spikes[i]
if i < len(pre_spikes) - 1:
i += 1
advance = True
# next spike is postsynaptic
if( (post_spikes[j] < pre_spikes[i]) and (post_spikes[j] < dopa_spikes[k])):
dt = post_spikes[j] - last_pre_spike
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d)*(exp((last_e_update-post_spikes[j])/tau_e)*exp((last_dopa_spike-post_spikes[j])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "post\t%.4f\t%.4f" % (post_spikes[j],w)
last_w_update = post_spikes[j]
Etrace = Etrace * exp((last_e_update - post_spikes[j])/tau_e) + A_plus*exp(-dt/tau_plus)
last_e_update = post_spikes[j]
last_post_spike = post_spikes[j]
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is dopamine spike
if ((dopa_spikes[k] < pre_spikes[i]) and (dopa_spikes[k] < post_spikes[j])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-dopa_spikes[k])/tau_e)*exp((last_dopa_spike-dopa_spikes[k])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "dopa\t%.4f\t%.4f" % (dopa_spikes[k],w)
last_w_update = dopa_spikes[k]
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
if(dopa_spikes[k]==dopa_spikes[k-1]):
advance = False
Dtrace = Dtrace + 1/tau_d
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# pre and postsynaptic spikes are at the same time
# Etrace is not updated for this case; therefore no weight update is required
if ((pre_spikes[i]==post_spikes[j]) and (pre_spikes[i] < dopa_spikes[k])):
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) -1:
j +=1
advance = True
# presynaptic spike and dopamine spike are at the same time
if ((pre_spikes[i]==dopa_spikes[k]) and (pre_spikes[i] < post_spikes[j])):
dt = pre_spikes[i] - last_post_spike
w = w + Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d)-exp((last_e_update-pre_spikes[i])/tau_e)*exp((last_dopa_spike-pre_spikes[i])/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "pre\t%.4f\t%.4f" % (pre_spikes[i],w)
last_w_update = pre_spikes[i]
Etrace = Etrace * exp((last_e_update - pre_spikes[i])/tau_e) - A_minus*exp(-dt/tau_minus)
last_e_update = pre_spikes[i]
last_pre_spike = pre_spikes[i]
if i < len(pre_spikes) - 1:
i += 1
advance = True
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# postsynaptic spike and dopamine spike are at the same time
if ((post_spikes[j]==dopa_spikes[k]) and (post_spikes[j] < pre_spikes[i])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d)*(exp((last_e_update-post_spikes[j])/tau_e)*exp((last_dopa_spike-post_spikes[j])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "post\t%.4f\t%.4f" % (post_spikes[j],w)
last_w_update = post_spikes[j]
Etrace = Etrace * exp((last_e_update - post_spikes[j])/tau_e) + A_plus*exp(-dt/tau_plus)
last_e_update = post_spikes[j]
last_post_spike = post_spikes[j]
if j < len(post_spikes) - 1:
j += 1
advance = True
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# all three spikes are at the same time
if ((post_spikes[j]==dopa_spikes[k]) and (post_spikes[j]==pre_spikes[i])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-dopa_spikes[k])/tau_e)*exp((last_dopa_spike-dopa_spikes[k])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "dopa\t%.4f\t%.4f" % (dopa_spikes[k],w)
last_w_update = dopa_spikes[k]
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
if(dopa_spikes[k]==dopa_spikes[k-1]):
advance = False
Dtrace = Dtrace + 1/tau_d
if k < len(dopa_spikes) - 1:
k += 1
advance = True
return w
# stdp dopa parameters
w_init = 35.
tau_plus = 20.
tau_minus = 15.
tau_e = 1000.
tau_d = 200.
A_minus = 1.5
A_plus = 1.0
dendritic_delay = 1.0
delay_d = 1.
# load spikes from simulation with test_stdp_dopa.py
spikes = n.loadtxt("spikes-3-0.gdf")
pre_spikes = spikes[find(spikes[:,0]==4),1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + dendritic_delay at the synapse
post_spikes =spikes[find(spikes[:,0]==5),1] + dendritic_delay
# dopa spike arrives at sp_j + delay_d at the synapse
dopa_spikes = spikes[find(spikes[:,0]==6),1] + delay_d
# calculate development of stdp weight
w = stdp_dopa(w_init, pre_spikes, post_spikes, dopa_spikes, tau_e, tau_d, A_minus, A_plus, tau_plus, tau_minus, dendritic_delay, delay_d)
print w
| gpl-2.0 |
sumspr/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
jshiv/turntable | test/lib/python2.7/site-packages/scipy/stats/morestats.py | 7 | 78330 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel, angle)
from numpy.testing.decorators import setastest
from scipy.lib.six import string_types
from scipy.lib._numpy_compat import count_nonzero
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability `alpha`.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
`alpha`.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy.stats import mvsdist
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if (n < 2):
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if (n > 1000): # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C)
else:
nm1 = n-1
fac = n*C/2.
val = nm1/2.
mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1))
sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac))
vdist = distributions.invgamma(val,scale=fac)
return mdist, vdist, sdist
def kstat(data,n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n+1,'d')
data = ravel(data)
N = len(data)
for k in range(1,n+1):
S[k] = sum(data**k,axis=0)
if n == 1:
return S[1]*1.0/N
elif n == 2:
return (N*S[2]-S[1]**2.0)/(N*(N-1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0))
elif n == 4:
return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \
(N*(N-1.0)*(N-2.0)*(N-3.0))
else:
raise ValueError("Should not be here.")
def kstatvar(data,n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data,n=2)*1.0/N
elif n == 2:
k2 = kstat(data,n=2)
k4 = kstat(data,n=4)
return (2*k2*k2*N + (N-1)*k4)/(N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Probability Plot')
plot.set_xlabel('Quantiles')
plot.set_ylabel('Ordered Values')
else:
# matplotlib.pyplot module
plot.title('Probability Plot')
plot.xlabel('Quantiles')
plot.ylabel('Ordered Values')
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80):
"""Returns (shape, ppcc), and optionally plots shape vs. ppcc
(probability plot correlation coefficient) as a function of shape
parameter for a one-parameter family of distributions from shape
value a to b.
See also ppcc_max
"""
svals = r_[a:b:complex(N)]
ppcc = svals*0.0
k = 0
for sval in svals:
r1,r2 = probplot(x,sval,dist=dist,fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
plot.plot(svals, ppcc, 'x')
plot.title('(%s) PPCC Plot' % dist)
plot.xlabel('Prob Plot Corr. Coef.')
plot.ylabel('Shape Values')
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when `alpha` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Box-Cox Normality Plot')
plot.set_ylabel('Prob Plot Corr. Coef.')
plot.set_xlabel('$\lambda$')
else:
# matplotlib.pyplot module
plot.title('Box-Cox Normality Plot')
plot.ylabel('Prob Plot Corr. Coef.')
plot.xlabel('$\lambda$')
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N,'f')
init = 0
else:
if len(a) != N//2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0,2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
def anderson(x,dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm','expon','gumbel','extreme1','logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y-xbar)/s
z = distributions.norm.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_expon / (1.0 + 0.6/N),3)
elif dist == 'logistic':
def rootfunc(ab,xj,N):
a,b = ab
tmp = (xj-a)/b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2),axis=0)-0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N]
return array(val)
sol0 = array([xbar,np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5)
w = (y-sol[0])/sol[1]
z = distributions.logistic.cdf(w)
sig = array([25,10,5,2.5,1,0.5])
critical = around(_Avals_logistic / (1.0+0.25/N),3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
# the following is incorrect, see ticket:1097
#def fixedsolve(th,xj,N):
# val = stats.sum(xj)*1.0/N
# tmp = exp(-xj/th)
# term = sum(xj*tmp,axis=0)
# term /= sum(tmp,axis=0)
# return val - term
#s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5)
#xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N)
xbar, s = distributions.gumbel_l.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_l.cdf(w)
sig = array([25,10,5,2.5,1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3)
i = arange(1,N+1)
S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0)
A2 = -N-S
return A2, critical, sig
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
p : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return A2, critical, p
def ansari(x,y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
AB : float
The Ansari-Bradley test statistic
p-value : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x,y = asarray(x),asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m+n
xy = r_[x,y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank,N-rank+1)),0)
AB = sum(symrank[:n],axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and ((m < 55) or (n < 55)):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n,m)
ind = AB-astart
total = sum(a1,axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if (ind == cind):
pval = 2.0*sum(a1[:cind+1],axis=0)/total
else:
pval = 2.0*sum(a1[:cind],axis=0)/total
else:
find = int(floor(ind))
if (ind == floor(ind)):
pval = 2.0*sum(a1[find:],axis=0)/total
else:
pval = 2.0*sum(a1[find+1:],axis=0)/total
return AB, min(1.0,pval)
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n*(N+1.0)**2 / 4.0 / N
varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2)
else:
mnAB = n*(N+2.0)/4.0
varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2,axis=0)
if N % 2: # N odd
varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1))
else: # N even
varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1))
z = (AB - mnAB)/sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
T : float
The test statistic.
p-value : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k,'d')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni,axis=0)
spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k))
numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0)
denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k))
T = numer / denom
pval = distributions.chi2.sf(T,k-1) # 1 - cdf
return T, pval
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [None]*k
for i in range(k):
Zij[i] = abs(asarray(args[i])-Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i]*Ni[i]
Zbar /= Ntot
numer = (Ntot-k) * sum(Ni*(Zbari-Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i]-Zbari[i])**2, axis=0)
denom = (k-1.0)*dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return W, pval
@setastest(False)
def binom_test(x, n=None, p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1]+x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x, n, p)
rerr = 1+1e-7
if (x == p*n):
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif (x < p*n):
i = np.arange(np.ceil(p*n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n-y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g)-1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args, **kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean','median','trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks/(2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = sum(Ni*(asarray(Aibar) - anbar)**2.0, axis=0)/varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis: int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
T : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
p-value : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' \
or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x-y
if zero_method == "wilcox":
d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences
count = len(d)
if (count < 10):
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count*(count + 1.) * 0.25
se = count*(count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return T, prob
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3*g2*g2
sigsq = 1.0/g2
sig = sqrt(sigsq)
mu = g1*sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] = p12[k]/sig**k
# Add all of the terms to polynomial
totp = p12[0] - (g1/6.0*p12[3]) + \
(g2/24.0*p12[4] + g1*g1/72.0*p12[6]) - \
(g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \
(g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] +
g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi)/sig
def thefunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high-low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j*ang), axis=axis))
mask = res < 0
if (mask.ndim > 0):
res[mask] += 2*pi
elif mask:
res = res + 2*pi
return res*(high-low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| mit |
Djabbz/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
ilo10/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
dsquareindia/scikit-learn | examples/preprocessing/plot_scaling_importance.py | 45 | 5269 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Importance of Feature Scaling
=========================================================
Feature scaling though standardization (or Z-score normalization)
can be an important preprocessing step for many machine learning
algorithms. Standardization involves rescaling the features such
that they have the properties of a standard normal distribution
with a mean of zero and a standard deviation of one.
While many algorithms (such as SVM, K-nearest neighbors, and logistic
regression) require features to be normalized, intuitively we can
think of Principle Component Analysis (PCA) as being a prime example
of when normalization is important. In PCA we are interested in the
components that maximize the variance. If one component (e.g. human
height) varies less than another (e.g. weight) because of their
respective scales (meters vs. kilos), PCA might determine that the
direction of maximal variance more closely corresponds with the
'weight' axis, if those features are not scaled. As a change in
height of one meter can be considered much more important than the
change in weight of one kilogram, this is clearly incorrect.
To illustrate this, PCA is performed comparing the use of data with
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` applied,
to unscaled data. The results are visualized and a clear difference noted.
The 1st principal component in the unscaled set can be seen. It can be seen
that feature #13 dominates the direction, being a whole two orders of
magnitude above the other features. This is contrasted when observing
the principal component for the scaled version of the data. In the scaled
version, the orders of magnitude are roughly the same across all the features.
The dataset used is the Wine Dataset available at UCI. This dataset
has continuous features that are heterogeneous in scale due to differing
properties that they measure (i.e alcohol content, and malic acid).
The transformed data is then used to train a naive Bayes classifier, and a
clear difference in prediction accuracies is observed wherein the dataset
which is scaled before PCA vastly outperforms the unscaled version.
"""
from __future__ import print_function
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.pipeline import make_pipeline
print(__doc__)
# Code source: Tyler Lanigan <[email protected]>
# Sebastian Raschka <[email protected]>
# License: BSD 3 clause
RANDOM_STATE = 42
FIG_SIZE = (10, 7)
features, target = load_wine(return_X_y=True)
# Make a train/test split using 30% test size
X_train, X_test, y_train, y_test = train_test_split(features, target,
test_size=0.30,
random_state=RANDOM_STATE)
# Fit to data and predict using pipelined GNB and PCA.
unscaled_clf = make_pipeline(PCA(n_components=2), GaussianNB())
unscaled_clf.fit(X_train, y_train)
pred_test = unscaled_clf.predict(X_test)
# Fit to data and predict using pipelined scaling, GNB and PCA.
std_clf = make_pipeline(StandardScaler(), PCA(n_components=2), GaussianNB())
std_clf.fit(X_train, y_train)
pred_test_std = std_clf.predict(X_test)
# Show prediction accuracies in scaled and unscaled data.
print('\nPrediction accuracy for the normal test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test)))
print('\nPrediction accuracy for the standardized test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test_std)))
# Extract PCA from pipeline
pca = unscaled_clf.named_steps['pca']
pca_std = std_clf.named_steps['pca']
# Show first principal componenets
print('\nPC 1 without scaling:\n', pca.components_[0])
print('\nPC 1 with scaling:\n', pca_std.components_[0])
# Scale and use PCA on X_train data for visualization.
scaler = std_clf.named_steps['standardscaler']
X_train_std = pca_std.transform(scaler.transform(X_train))
# visualize standardized vs. untouched dataset with PCA performed
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax1.scatter(X_train[y_train == l, 0], X_train[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax2.scatter(X_train_std[y_train == l, 0], X_train_std[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
ax1.set_title('Training dataset after PCA')
ax2.set_title('Standardized training dataset after PCA')
for ax in (ax1, ax2):
ax.set_xlabel('1st principal component')
ax.set_ylabel('2nd principal component')
ax.legend(loc='upper right')
ax.grid()
plt.tight_layout()
plt.show()
| bsd-3-clause |
lfairchild/PmagPy | programs/deprecated/biplot_magic.py | 2 | 8402 | #!/usr/bin/env python
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
biplot_magic.py
DESCRIPTION
makes a biplot of specified variables from magic_measurements.txt format file
SYNTAX
biplot_magic.py [-h] [-i] [command line options]
INPUT
takes magic formated magic_measurments file
OPTIONS
-h prints help message and quits
-i interactively set filename and axes for plotting
-f FILE: specifies file name, default: magic_measurements.txt
-fmt [svg,png,jpg], format for images - default is svg
-sav figure and quit
-x XMETH:key:step, specify method code for X axis (optional key and treatment values)
-y YMETH:key:step, specify method code for X axis
-obj OBJ: specify object [loc, sit, sam, spc] for plot, default is whole file
-n [V,M] plot volume or mass normalized data only
NOTES
if nothing is specified for x and y, the user will be presented with options
key = ['treatment_ac_field','treatment_dc_field',treatment_temp']
step in mT for fields, K for temperatures
"""
#
file='magic_measurements.txt'
methx,methy,fmt="","",'.svg'
plot_key=''
norm_by=""
#plot=0
no_plot = pmag.get_flag_arg_from_sys('-sav')
if not no_plot:
do_plot = True
else:
do_plot = False
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt='.'+sys.argv[ind+1]
if '-n' in sys.argv:
ind=sys.argv.index('-n')
norm_by=sys.argv[ind+1]
xtreat_key,ytreat_key,xstep,ystep="","","",""
if '-x' in sys.argv:
ind=sys.argv.index('-x')
meths=sys.argv[ind+1].split(':')
methx=meths[0]
if len(meths)>1:
xtreat_key=meths[1]
xstep=float(meths[2])
if '-y' in sys.argv:
ind=sys.argv.index('-y')
meths=sys.argv[ind+1].split(':')
methy=meths[0]
if len(meths)>1:
ytreat_key=meths[1]
ystep=float(meths[2])
if '-obj' in sys.argv:
ind=sys.argv.index('-obj')
plot_by=sys.argv[ind+1]
if plot_by=='loc':plot_key='er_location_name'
if plot_by=='sit':plot_key='er_site_name'
if plot_by=='sam':plot_key='er_sample_name'
if plot_by=='spc':plot_key='er_specimen_name'
if '-h' in sys.argv:
do_plot = False
if '-i' in sys.argv:
#
# get name of file from command line
#
file=input("Input magic_measurments file name? [magic_measurements.txt] ")
if file=="":file="magic_measurements.txt"
#
#
FIG={'fig':1}
pmagplotlib.plot_init(FIG['fig'],5,5)
data,file_type=pmag.magic_read(file)
if file_type!="magic_measurements":
print(file_type,' not correct format for magic_measurments file')
sys.exit()
#
# collect method codes
methods,plotlist=[],[]
for rec in data:
if plot_key!="":
if rec[plot_key] not in plotlist:plotlist.append(rec[plot_key])
elif len(plotlist)==0:
plotlist.append('All')
meths=rec['magic_method_codes'].split(':')
for meth in meths:
if meth.strip() not in methods and meth.strip()!="LP-":
methods.append(meth.strip())
#
if '-i' in sys.argv:
print(methods)
elif methx =="" or methy=="":
print(methods)
sys.exit()
GoOn=1
while GoOn==1:
if '-i' in sys.argv:methx=input('Select method for x axis: ')
if methx not in methods:
if '-i' in sys.argv:
print('try again! method not available')
else:
print(main.__doc__)
print('\n must specify X axis method\n')
sys.exit()
else:
if pmagplotlib.verbose: print(methx, ' selected for X axis')
GoOn=0
GoOn=1
while GoOn==1:
if '-i' in sys.argv:methy=input('Select method for y axis: ')
if methy not in methods:
if '-i' in sys.argv:
print('try again! method not available')
else:
print(main.__doc__)
print('\n must specify Y axis method\n')
sys.exit()
else:
if pmagplotlib.verbose: print(methy, ' selected for Y axis')
GoOn=0
if norm_by=="":
measkeys=['measurement_magn_mass','measurement_magn_volume','measurement_magn_moment','measurement_magnitude','measurement_chi_volume','measurement_chi_mass','measurement_chi']
elif norm_by=="V":
measkeys=['measurement_magn_volume','measurement_chi_volume']
elif norm_by=="M":
measkeys=['measurement_magn_mass','measurement_chi_mass']
xmeaskey,ymeaskey="",""
plotlist.sort()
for plot in plotlist: # go through objects
if pmagplotlib.verbose:
print(plot)
X,Y=[],[]
x,y='',''
for rec in data:
if plot_key!="" and rec[plot_key]!=plot:
pass
else:
meths=rec['magic_method_codes'].split(':')
for meth in meths:
if meth.strip()==methx:
if xmeaskey=="":
for key in measkeys:
if key in list(rec.keys()) and rec[key]!="":
xmeaskey=key
if pmagplotlib.verbose:
print(xmeaskey,' being used for plotting X.')
break
if meth.strip()==methy:
if ymeaskey=="":
for key in measkeys:
if key in list(rec.keys()) and rec[key]!="":
ymeaskey=key
if pmagplotlib.verbose:
print(ymeaskey,' being used for plotting Y')
break
if ymeaskey!="" and xmeaskey!="":
for rec in data:
x,y='',''
spec=rec['er_specimen_name'] # get the ydata for this specimen
if rec[ymeaskey]!="" and methy in rec['magic_method_codes'].split(':'):
if ytreat_key=="" or (ytreat_key in list(rec.keys()) and float(rec[ytreat_key])==ystep):
y=float(rec[ymeaskey])
for rec in data: # now find the xdata
if rec['er_specimen_name']==spec and rec[xmeaskey]!="" and methx in rec['magic_method_codes'].split(':'):
if xtreat_key=="" or (xtreat_key in list(rec.keys()) and float(rec[xtreat_key])==xstep):
x=float(rec[xmeaskey])
if x != '' and y!= '':
X.append(x)
Y.append(y)
if len(X)>0:
pmagplotlib.clearFIG(FIG['fig'])
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=methx,ylab=methy,title=plot+':Biplot')
if not pmagplotlib.isServer and do_plot:
pmagplotlib.draw_figs(FIG)
ans=input('S[a]ve plots, [q]uit, Return for next plot ' )
if ans=='a':
files={}
for key in list(FIG.keys()): files[key]=plot+'_'+key+fmt
pmagplotlib.save_plots(FIG,files)
if ans=='q':
print("Good-bye\n")
sys.exit()
else:
files={}
for key in list(FIG.keys()): files[key]=plot+'_'+key+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['fig']='X Y Plot'
FIG = pmagplotlib.add_borders(FIG,titles,black,purple)
pmagplotlib.save_plots(FIG,files)
else:
print('nothing to plot for ',plot)
if __name__ == "__main__":
main()
| bsd-3-clause |
ky822/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/examples/learn/iris_custom_decay_dnn.py | 30 | 2039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| mit |
christophreimer/pytesmo | pytesmo/validation_framework/temporal_matchers.py | 1 | 3571 | # Copyright (c) 2013,Vienna University of Technology, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology, Department of Geodesy and Geoinformation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Sep 24, 2013
@author: [email protected]
'''
import pytesmo.temporal_matching as temp_match
import pandas as pd
class BasicTemporalMatching(object):
"""
Temporal matching object
Parameters
----------
window : float
window size to use for temporal matching. A match in other will only
be found if it is +- window size days away from a point in reference
"""
def __init__(self, window=0.5, reverse=False):
self.window = window
if reverse:
self.match = self.match_reverse
def match(self, reference, other):
"""
takes reference and other dataframe and returnes a joined Dataframe
in this case the reference dataset for the grid is also the
temporal reference dataset
"""
# temporal match comparison to reference TimeSeries
try:
matched_other = temp_match.df_match(reference, other,
window=self.window, dropna=True)
except ValueError:
return pd.DataFrame()
matched_other = matched_other.drop(['distance', 'index'], axis=1)
return matched_other.join(reference)
def match_reverse(self, reference, other):
"""
takes reference and other dataframe and returnes a joined Dataframe
in this case the reference dataset for the grid is also the
temporal reference dataset
"""
# temporal match comparison to reference TimeSeries
try:
matched_ref = temp_match.df_match(other, reference,
window=self.window, dropna=True)
except ValueError:
return pd.DataFrame()
matched_ref = matched_ref.drop(['distance', 'index'], axis=1)
return matched_ref.join(other)
| bsd-3-clause |
awanke/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
sdsc/xsede_stats | tacc_stats/analysis/plot/metadatarate.py | 2 | 2047 | from plots import Plot
from matplotlib.figure import Figure
from tacc_stats.analysis.gen import tspl_utils
import numpy
class MetaDataRatePlot(Plot):
k1=['llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', ]
k2=['open','close','mmap','fsync','setattr',
'truncate','flock','getattr','statfs','alloc_inode',
'setxattr',' listxattr',
'removexattr', 'readdir',
'create','lookup','link','unlink','symlink','mkdir',
'rmdir','mknod','rename',]
def plot(self,jobid,job_data=None):
self.setup(jobid,job_data=job_data)
ts = self.ts
self.fig = Figure(figsize=(10,8),dpi=80)
self.ax=self.fig.add_subplot(1,1,1)
self.ax=[self.ax]
self.fig.subplots_adjust(hspace=0.35)
markers = ('o','x','+','^','s','8','p',
'h','*','D','<','>','v','d','.')
colors = ('b','g','r','c','m','k','y')
tmid=(self.ts.t[:-1]+self.ts.t[1:])/2.0
cnt=0
for v in ts.data:
for host in v:
for vals in v[host]:
rate=numpy.diff(vals)/numpy.diff(ts.t)
c=colors[cnt % len(colors)]
m=markers[cnt % len(markers)]
self.ax[0].plot(tmid/3600., rate, marker=m,
markeredgecolor=c, linestyle='-', color=c,
markerfacecolor='None', label=self.k2[cnt])
self.ax[0].hold=True
cnt=cnt+1
self.ax[0].set_ylabel('Meta Data Rate (op/s)')
tspl_utils.adjust_yaxis_range(self.ax[0],0.1)
handles,labels=self.ax[0].get_legend_handles_labels()
new_handles={}
for h,l in zip(handles,labels):
new_handles[l]=h
box = self.ax[0].get_position()
self.ax[0].set_position([box.x0, box.y0, box.width * 0.9, box.height])
self.ax[0].legend(new_handles.values(),new_handles.keys(),prop={'size':8},
bbox_to_anchor=(1.05,1), borderaxespad=0., loc=2)
self.output('metadata')
| lgpl-2.1 |
BoltzmannBrain/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/bezier.py | 70 | 14387 | """
A module providing some utility functions regarding bezier path manipulation.
"""
import numpy as np
from math import sqrt
from matplotlib.path import Path
from operator import xor
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a*d-b*c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_* line1_rhs + b_ * line2_rhs
y = c_* line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*,
return locations of the two points located along its perpendicular line at the distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy
x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1-t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise ValueError("the segment does not seemed to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2:
return t0, t1
# calculate the middle point
middle_t = 0.5*(t0+t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A simple class of a 2-dimensional bezier segment
"""
# Highrt order bezier lines can be supported by simplying adding
# correcponding values.
_binom_coeff = {1:np.array([1., 1.]),
2:np.array([1., 2., 1.]),
3:np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:,0]
yy = _control_points[:,1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1.-t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0+t1)/2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t*r + cx, sin_t*r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax, tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = path_iter.next()
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold=0
i = 1
for ctl_points, command in path_iter:
iold=i
i += len(ctl_points)/2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = zip(bezier_path[::2], bezier_path[1::2])
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r**2
def _f(xy):
x, y = xy
return (x-cx)**2 + (y-cy)**2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1-x0, y1-y0
d = (dx*dx + dy*dy)**.5
return dx/d, dy/d
def get_parallels(bezier2, width):
"""
Given the quadraitc bezier control points *bezier2*, returns
control points of quadrativ bezier lines roughly parralel to given
one separated by *width*.
"""
# The parallel bezier lines constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c2.
# They are also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]
return path_left, path_right
def make_wedged_bezier2(bezier2, length, shrink_factor=0.5):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
xx1, yy1 = bezier2[2]
xx2, yy2 = bezier2[1]
xx3, yy3 = bezier2[0]
cx, cy = xx3, yy3
x0, y0 = xx2, yy2
dist = sqrt((x0-cx)**2 + (y0-cy)**2)
cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,
x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)
xx12, yy12 = (xx1+xx2)/2., (yy1+yy2)/2.,
xx23, yy23 = (xx2+xx3)/2., (yy2+yy3)/2.,
dist = sqrt((xx12-xx23)**2 + (yy12-yy23)**2)
cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,
xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)
l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]
l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]
return l_plus, l_minus
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1.
"""
cmx = .5 * (4*mmx - (c1x + c2x))
cmy = .5 * (4*mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and c12-c23
c12x, c12y = (c1x+cmx)*.5, (c1y+cmy)*.5
c23x, c23y = (cmx+c3x)*.5, (cmy+c3y)*.5
c123x, c123y = (c12x+c23x)*.5, (c12y+c23y)*.5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
if 0:
path = Path([(0, 0), (1, 0), (2, 2)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
left, right = divide_path_inout(path, inside)
clf()
ax = gca()
| agpl-3.0 |
waynenilsen/statsmodels | statsmodels/genmod/cov_struct.py | 19 | 46892 | from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
import pandas as pd
from scipy import linalg as spl
from collections import defaultdict
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
"""
Some details for the covariance calculations can be found in the Stata
docs:
http://www.stata.com/manuals13/xtxtgee.pdf
"""
class CovStruct(object):
"""
A base class for correlation and covariance structures of grouped
data.
Each implementation of this class takes the residuals from a
regression model that has been fitted to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The state of the covariance structure is represented through the
value of the class variable `dep_params`. The default state of a
newly-created instance should correspond to the identity
correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it not SPD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Updates the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array-like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array-like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array-like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array-like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array-like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array-like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history member of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if success == False:
warnings.warn("Unable to condition covariance matrix to an SPD matrix using cov_nearest",
ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev**2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return "Observations within a cluster are modeled as being independent."
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
varfunc = self.model.family.variance
cached_means = self.model.cached_means
has_weights = self.model.weights is not None
weights_li = self.model.weights
residsq_sum, scale = 0, 0
fsum1, fsum2, n_pairs = 0., 0., 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
f = weights_li[i] if has_weights else 1.
ngrp = len(resid)
residsq = np.outer(resid, resid)
scale += f * np.trace(residsq)
fsum1 += f * len(endog[i])
residsq = np.tril(residsq, -1)
residsq_sum += f * residsq.sum()
npr = 0.5 * ngrp * (ngrp - 1)
fsum2 += f * npr
n_pairs += npr
ddof = self.model.ddof_scale
scale /= (fsum1 * (nobs - ddof) / float(nobs))
residsq_sum /= scale
self.dep_params = residsq_sum / (fsum2 * (n_pairs - ddof) / float(n_pairs))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
np.fill_diagonal(dp, 1)
return dp, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups, each level of which contributes to the random error term
of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Examples
--------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a vector of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for nested cov_struct, using unweighted covariance estimate")
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:,None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[[ix1, ix2]] = ncm + 1
ilabel[[ix2, ix1]] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest+1), dtype=np.float64)
dsx[:,0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k+1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid**2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
msg = "Variance estimates\n------------------\n"
for k in range(len(self.vcomp_coeff)):
msg += "Component %d: %.3f\n" % (k+1, self.vcomp_coeff[k])
msg += "Residual: %.3f\n" % (self.scale -
np.sum(self.vcomp_coeff))
return msg
class Stationary(CovStruct):
"""
A stationary covariance structure.
The correlation between two observations is an arbitrary function
of the distance between them. Distances up to a given maximum
value are included in the covariance model.
Parameters
----------
max_lag : float
The largest distance that is included in the covariance model.
grid : bool
If True, the index positions in the data (after dropping missing
values) are used to define distances, and the `time` variable is
ignored.
"""
def __init__(self, max_lag=1, grid=False):
super(Stationary, self).__init__()
self.max_lag = max_lag
self.grid = grid
self.dep_params = np.zeros(max_lag)
def initialize(self, model):
super(Stationary, self).initialize(model)
# Time used as an index needs to be integer type.
if self.grid == False:
time = self.model.time[:, 0].astype(np.int32)
self.time = self.model.cluster_list(time)
def update(self, params):
if self.grid:
self.update_grid(params)
else:
self.update_nogrid(params)
def update_grid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
dep_params[0] += np.sum(resid * resid) / len(resid)
for j in range(1, self.max_lag + 1):
dep_params[j] += np.sum(resid[0:-j] * resid[j:]) / len(resid[j:])
self.dep_params = dep_params[1:] / dep_params[0]
def update_nogrid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
dn = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
j1, j2 = np.tril_indices(len(expval))
dx = np.abs(self.time[i][j1] - self.time[i][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
vs = np.bincount(dx, weights=resid[j1] * resid[j2], minlength=self.max_lag+1)
vd = np.bincount(dx, minlength=self.max_lag+1)
ii = np.flatnonzero(vd > 0)
dn[ii] += 1
if len(ii) > 0:
dep_params[ii] += vs[ii] / vd[ii]
dep_params /= dn
self.dep_params = dep_params[1:] / dep_params[0]
def covariance_matrix(self, endog_expval, index):
if self.grid:
return self.covariance_matrix_grid(endog_expal, index)
j1, j2 = np.tril_indices(len(endog_expval))
dx = np.abs(self.time[index][j1] - self.time[index][j2])
ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag))
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
cmat = np.eye(len(endog_expval))
cmat[j1, j2] = self.dep_params[dx - 1]
cmat[j2, j1] = self.dep_params[dx - 1]
return cmat, True
def covariance_matrix_grid(self, endog_expval, index):
from scipy.linalg import toeplitz
r = np.zeros(len(endog_expval))
r[0] = 1
r[1:self.max_lag + 1] = self.dep_params
return toeplitz(r), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
if self.grid == False:
return super(Stationary, self).covariance_matrix_solve(expval, index, stdev, rhs)
from statsmodels.tools.linalg import stationary_solve
r = np.zeros(len(expval))
r[0:self.max_lag] = self.dep_params
return [stationary_solve(r, x) for x in rhs]
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Stationary dependence parameters\n",
self.dep_params)
class Autoregressive(CovStruct):
"""
A first-order autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class, which defaults to the index position of each
value within its cluster, based on the order of values in the
input data set. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined.
The correlation between two observations in the same cluster is
dep_params^distance, where `dep_params` contains the (scalar)
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
References
----------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
if self.model.weights is not None:
warnings.warn("weights not implemented for autoregressive cov_struct, using unweighted covariance estimate")
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params**(2*designx)
var /= 1. - self.dep_params**2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a**designx)*residmat[:, 1]
return np.dot(dif**2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params**np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev**2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params**2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params**2) / (1. - self.dep_params**2)
c1 = 1. / (1. - self.dep_params**2)
c2 = -self.dep_params / (1. - self.dep_params**2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:,:], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1,:]), axis=0)
y = c0*x + c2*rhs1 + c2*rhs2
y[0, :] = c1*x[0, :] + c2*x[1, :]
y[-1, :] = c1*x[-1, :] + c2*x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class CategoricalCovStruct(CovStruct):
"""
Parent class for covariance structure for categorical data models.
Attributes
----------
nlevel : int
The number of distinct levels for the outcome variable.
ibd : list
A list whose i^th element ibd[i] is an array whose rows
contain integer pairs (a,b), where endog_li[i][a:b] is the
subvector of binary indicators derived from the same ordinal
value.
"""
def initialize(self, model):
super(CategoricalCovStruct, self).initialize(model)
self.nlevel = len(model.endog_values)
self._ncut = self.nlevel - 1
from numpy.lib.stride_tricks import as_strided
b = np.dtype(np.int64).itemsize
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64)
jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b))
ibd.append(jj)
self.ibd = ibd
class GlobalOddsRatio(CategoricalCovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes
-----
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio cov_struct, using unweighted covariance estimate")
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1+1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1*self._ncut + k1
jj[:, 1] = i2*self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w*e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.))**2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(evy, evy)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
endog = self.model.endog_li
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
class OrdinalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for ordinal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(ev, ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class NominalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for nominal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.diag(ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class Equivalence(CovStruct):
"""
A covariance structure defined in terms of equivalence classes.
An 'equivalence class' is a set of pairs of observations such that
the covariance of every pair within the equivalence class has a
common value.
Parameters
----------
pairs : dict-like
A dictionary of dictionaries, where `pairs[group][label]`
provides the indices of all pairs of observations in the group
that have the same covariance value. Specifically,
`pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2`
are integer arrays of the same length. `j1[i], j2[i]` is one
index pair that belongs to the `label` equivalence class. Only
one triangle of each covariance matrix should be included.
Positions where j1 and j2 have the same value are variance
parameters.
labels : array-like
An array of labels such that every distinct pair of labels
defines an equivalence class. Either `labels` or `pairs` must
be provided. When the two labels in a pair are equal two
equivalence classes are defined: one for the diagonal elements
(corresponding to variances) and one for the off-diagonal
elements (corresponding to covariances).
return_cov : boolean
If True, `covariance_matrix` returns an estimate of the
covariance matrix, otherwise returns an estimate of the
correlation matrix.
Notes
-----
Using `labels` to define the class is much easier than using
`pairs`, but is less general.
Any pair of values not contained in `pairs` will be assigned zero
covariance.
The index values in `pairs` are row indices into the `exog`
matrix. They are not updated if missing data are present. When
using this covariance structure, missing data should be removed
before constructing the model.
If using `labels`, after a model is defined using the covariance
structure it is possible to remove a label pair from the second
level of the `pairs` dictionary to force the corresponding
covariance to be zero.
Examples
--------
The following sets up the `pairs` dictionary for a model with two
groups, equal variance for all observations, and constant
covariance for all pairs of observations within each group.
>> pairs = {0: {}, 1: {}}
>> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2])
>> pairs[0][1] = np.tril_indices(3, -1)
>> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5])
>> pairs[1][2] = 3 + np.tril_indices(3, -1)
"""
def __init__(self, pairs=None, labels=None, return_cov=False):
super(Equivalence, self).__init__()
if (pairs is None) and (labels is None):
raise ValueError("Equivalence cov_struct requires either `pairs` or `labels`")
if (pairs is not None) and (labels is not None):
raise ValueError("Equivalence cov_struct accepts only one of `pairs` and `labels`")
if pairs is not None:
import copy
self.pairs = copy.deepcopy(pairs)
if labels is not None:
self.labels = np.asarray(labels)
self.return_cov = return_cov
def _make_pairs(self, i, j):
"""
Create arrays `i_`, `j_` containing all unique ordered pairs of elements in `i` and `j`.
The arrays `i` and `j` must be one-dimensional containing non-negative integers.
"""
mat = np.zeros((len(i)*len(j), 2), dtype=np.int32)
# Create the pairs and order them
f = np.ones(len(j))
mat[:, 0] = np.kron(f, i).astype(np.int32)
f = np.ones(len(i))
mat[:, 1] = np.kron(j, f).astype(np.int32)
mat.sort(1)
# Remove repeated rows
try:
dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1]))
bmat = np.ascontiguousarray(mat).view(dtype)
_, idx = np.unique(bmat, return_index=True)
except TypeError:
# workaround for old numpy that can't call unique with complex
# dtypes
np.random.seed(4234)
bmat = np.dot(mat, np.random.uniform(size=mat.shape[1]))
_, idx = np.unique(bmat, return_index=True)
mat = mat[idx, :]
return mat[:, 0], mat[:, 1]
def _pairs_from_labels(self):
from collections import defaultdict
pairs = defaultdict(lambda : defaultdict(lambda : None))
model = self.model
df = pd.DataFrame({"labels": self.labels, "groups": model.groups})
gb = df.groupby(["groups", "labels"])
ulabels = np.unique(self.labels)
for g_ix, g_lb in enumerate(model.group_labels):
# Loop over label pairs
for lx1 in range(len(ulabels)):
for lx2 in range(lx1+1):
lb1 = ulabels[lx1]
lb2 = ulabels[lx2]
try:
i1 = gb.groups[(g_lb, lb1)]
i2 = gb.groups[(g_lb, lb2)]
except KeyError:
continue
i1, i2 = self._make_pairs(i1, i2)
clabel = str(lb1) + "/" + str(lb2)
# Variance parameters belong in their own equiv class.
jj = np.flatnonzero(i1 == i2)
if len(jj) > 0:
clabelv = clabel + "/v"
pairs[g_lb][clabelv] = (i1[jj], i2[jj])
# Covariance parameters
jj = np.flatnonzero(i1 != i2)
if len(jj) > 0:
i1 = i1[jj]
i2 = i2[jj]
pairs[g_lb][clabel] = (i1, i2)
self.pairs = pairs
def initialize(self, model):
super(Equivalence, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for equalence cov_struct, using unweighted covariance estimate")
if not hasattr(self, 'pairs'):
self._pairs_from_labels()
# Initialize so that any equivalence class containing a
# variance parameter has value 1.
self.dep_params = defaultdict(lambda : 0.)
self._var_classes = set([])
for gp in self.model.group_labels:
for lb in self.pairs[gp]:
j1, j2 = self.pairs[gp][lb]
if np.any(j1 == j2):
if not np.all(j1 == j2):
warnings.warn("equivalence class contains both variance and covariance parameters")
self._var_classes.add(lb)
self.dep_params[lb] = 1
# Need to start indexing at 0 within each group.
# rx maps olds indices to new indices
rx = -1 * np.ones(len(self.model.endog), dtype=np.int32)
for g_ix, g_lb in enumerate(self.model.group_labels):
ii = self.model.group_indices[g_lb]
rx[ii] = np.arange(len(ii), dtype=np.int32)
# Reindex
for gp in self.model.group_labels:
for lb in self.pairs[gp].keys():
a, b = self.pairs[gp][lb]
self.pairs[gp][lb] = (rx[a], rx[b])
def update(self, params):
endog = self.model.endog_li
varfunc = self.model.family.variance
cached_means = self.model.cached_means
dep_params = defaultdict(lambda : [0., 0., 0.])
n_pairs = defaultdict(lambda : 0)
dim = len(params)
for k, gp in enumerate(self.model.group_labels):
expval, _ = cached_means[k]
stdev = np.sqrt(varfunc(expval))
resid = (endog[k] - expval) / stdev
for lb in self.pairs[gp].keys():
if (not self.return_cov) and lb in self._var_classes:
continue
jj = self.pairs[gp][lb]
dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]])
if not self.return_cov:
dep_params[lb][1] += np.sum(resid[jj[0]]**2)
dep_params[lb][2] += np.sum(resid[jj[1]]**2)
n_pairs[lb] += len(jj[0])
if self.return_cov:
for lb in dep_params.keys():
dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim)
else:
for lb in dep_params.keys():
den = np.sqrt(dep_params[lb][1] * dep_params[lb][2])
dep_params[lb] = dep_params[lb][0] / den
for lb in self._var_classes:
dep_params[lb] = 1.
self.dep_params = dep_params
self.n_pairs = n_pairs
def covariance_matrix(self, expval, index):
dim = len(expval)
cmat = np.zeros((dim, dim))
g_lb = self.model.group_labels[index]
for lb in self.pairs[g_lb].keys():
j1, j2 = self.pairs[g_lb][lb]
cmat[j1, j2] = self.dep_params[lb]
cmat = cmat + cmat.T
np.fill_diagonal(cmat, cmat.diagonal() / 2)
return cmat, not self.return_cov
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/backend_gtk3.py | 8 | 39097 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gi
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except AttributeError:
raise ImportError(
"pygobject version too old -- it must have require_version")
except ValueError:
raise ImportError(
"Gtk3 backend requires the GObject introspection bindings for Gtk 3 "
"to be installed.")
try:
from gi.repository import Gtk, Gdk, GObject, GLib
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import (ShowBase, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if Gtk.main_level() == 0:
Gtk.main()
show = Show()
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK3 for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3 (Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
default_context = GLib.main_context_get_thread_default() or GLib.main_context_default()
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def size_allocate(self, widget, allocation):
if _debug:
print("FigureCanvasGTK3.%s" % fn_name())
print("size_allocate (%d x %d)" % (allocation.width, allocation.height))
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
self._need_redraw = True
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration()
Gdk.flush()
Gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK3(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The Gtk.Toolbar (gtk only)
vbox : The Gtk.VBox containing the canvas and toolbar (gtk only)
window : The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# doing a blanket catch here, but am not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
def add_widget(child, expand, fill, padding):
child.show()
self.vbox.pack_end(child, False, False, 0)
size_request = child.size_request()
return size_request.height
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarGTK3(self.toolmanager)
h += add_widget(self.statusbar, False, False, 0)
h += add_widget(Gtk.HSeparator(), False, False, 0)
if self.toolbar is not None:
self.toolbar.show()
h += add_widget(self.toolbar, False, False, 0)
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
Gtk.main_level() >= 1:
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3 (self.canvas, self.window)
elif rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK3(self.toolmanager)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
# must be initialised after toolbar has been setted
if rcParams['toolbar'] != 'toolbar2':
toolmanager = ToolManager(self.canvas)
else:
toolmanager = None
return toolmanager
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
#self.canvas.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
return filename, self.ext
class RubberbandGTK3(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self.ctx = None
def draw_rubberband(self, x0, y0, x1, y1):
# 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/
# Recipe/189744'
self.ctx = self.figure.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.figure.canvas.draw()
height = self.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
class ToolbarGTK3(ToolContainerBase, Gtk.Box):
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea = Gtk.Box()
self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
self.pack_start(self._toolarea, False, False, 0)
self._toolarea.show_all()
self._groups = {}
self._toolitems = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
tbutton = Gtk.ToggleToolButton()
else:
tbutton = Gtk.ToolButton()
tbutton.set_label(name)
if image_file is not None:
image = Gtk.Image()
image.set_from_file(image_file)
tbutton.set_icon_widget(image)
if position is None:
position = -1
self._add_button(tbutton, group, position)
signal = tbutton.connect('clicked', self._call_tool, name)
tbutton.set_tooltip_text(description)
tbutton.show_all()
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tbutton, signal))
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
toolbar = Gtk.Toolbar()
toolbar.set_style(Gtk.ToolbarStyle.ICONS)
self._toolarea.pack_start(toolbar, False, False, 0)
toolbar.show_all()
self._groups[group] = toolbar
self._groups[group].insert(button, position)
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event('%s Not in toolbar' % name, self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea.pack_start(sep, False, True, 0)
sep.show_all()
class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
def __init__(self, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
Gtk.Statusbar.__init__(self)
self._context = self.get_context_id('message')
def set_message(self, s):
self.pop(self._context)
self.push(self._context, s)
class SaveFigureGTK3(backend_tools.SaveFigureBase):
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.figure.canvas.manager.window,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.figure.canvas.get_supported_filetypes(),
default_filetype=self.figure.canvas.get_default_filetype())
fc.set_current_name(self.figure.canvas.get_default_filename())
return fc
def trigger(self, *args, **kwargs):
chooser = self.get_filechooser()
fname, format_ = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(
rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
self.figure.canvas.print_figure(fname, format=format_)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class SetCursorGTK3(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.get_property("window").set_cursor(cursord[cursor])
class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def init_window(self):
if self.window:
return
self.window = Gtk.Window(title="Subplot Configuration Tool")
try:
self.window.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.window.connect('destroy', self.destroy)
toolfig = Figure(figsize=(6, 3))
canvas = self.figure.canvas.__class__(toolfig)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
self.window.set_default_size(w, h)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
self.window.show()
def destroy(self, *args):
self.window.destroy()
self.window = None
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def trigger(self, sender, event, data=None):
self.init_window()
self.window.present()
class DialogLineprops(object):
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import Gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = Gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = Gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = Gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
backend_tools.ToolSaveFigure = SaveFigureGTK3
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
backend_tools.ToolSetCursor = SetCursorGTK3
backend_tools.ToolRubberband = RubberbandGTK3
Toolbar = ToolbarGTK3
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
| apache-2.0 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/sklearn/sklearn_decision_tree.py | 2 | 1567 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from sklearn import datasets
from sklearn import model_selection
from sklearn import tree
#---------------------------------------------------------------------
def decision_tree_classifier_example():
iris = datasets.load_iris()
X, Y = iris.data, iris.target
classifier = tree.DecisionTreeClassifier(random_state=0)
classifier.fit(X, Y)
print('Feature importance =', classifier.feature_importances_)
X_test = [[0, 0, 0, 0]]
#X_test = X
print('Prediction =', classifier.predict(X_test))
print('Prediction (probability) =', classifier.predict_proba(X_test))
print('Prediction (log probability) =', classifier.predict_log_proba(X_test))
print('Score =', classifier.score(X, Y))
print('Decision path =', classifier.decision_path(X))
print('Index of the leaf =', classifier.apply(X))
#---------------------------------------------------------------------
def decision_tree_regressor_example():
boston = datasets.load_boston()
X, Y = boston.data, boston.target
regressor = tree.DecisionTreeRegressor(random_state=0)
regressor.fit(X, Y)
print('Feature importance =', regressor.feature_importances_)
X_test = X
print('Prediction =', regressor.predict(X_test))
print('Score =', regressor.score(X, Y))
print('Decision path =', regressor.decision_path(X))
print('Index of the leaf =', regressor.apply(X))
def main():
decision_tree_classifier_example()
decision_tree_regressor_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
DebbyChiang/prospecting-reformat | For-LeadFuze/department.py | 1 | 16323 | #Code can be shortened by using more string operations
#import libraries
import pandas as pd
import numpy as np
#import csv file
prospects = pd.DataFrame(pd.read_csv("/Users/open/Desktop/reformatted.csv"))
#IT keywords --> fill corresponding Department column/row w/"IT"
#Keyword: CTO
prospects['IT'] = prospects['Title'].str.contains("CTO ")
prospects['IT'] = prospects['Title'].str.contains(" CTO")
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains(" cto")
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("cto ")
#Keyword: Technology
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("Technology")
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("TECHNOLOGY")
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("technology")
#Keyword: IT
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("IT")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" IT")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("IT ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" IT ")
#Keyword: Solution
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("Solution")
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("SOLUTION")
prospects['IT'] = prospects ['IT'] | prospects['Title'].str.contains("solution")
#reference the positions where you want to change the value
prospects.loc[prospects.IT >= 1, 'Department'] = "IT"
#Keyword: Facilities/Facility
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Facilities")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Facilities ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("facilities")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("FACILITIES")
prospects['HR'] = prospects['IT'] | prospects['Title'].str.contains("Facility")
prospects['HR'] = prospects['IT'] | prospects['Title'].str.contains("facility")
prospects['HR'] = prospects['IT'] | prospects['Title'].str.contains("FACILITY")
#Keyword: Sustainability
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Sustainability")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("sustainability")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("SUSTAINABILITY")
#Keyword: Infrastructure
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Infrastructure")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Infrastructure")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Infrastructure ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Infrastructure ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("infrastructure")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("INFRASTRUCTURE")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Infrastucture")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("infrastucture")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Infrastucture")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Infrastucture".lower())
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Infrastucture".upper())
#Keyword: Systems/System
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Systems")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Systems")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Systems ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("systems")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("SYSTEMS")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("System")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("System ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" System")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" System ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("system")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("SYSTEM")
#need this, not sure why
prospects.loc[prospects.IT >= 1, 'Department'] = "IT"
#Keyword: Sys
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Sys")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Sys ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Sys ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Sys")
#Keyword: Data
prospects['IT'] = prospects['Title'].str.contains("Data")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("data")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("DATABASE")
#Keyword: Information
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Information")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("information")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("INFORMATION")
#Keyword: Technical
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Technical")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("technical")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("TECHNICAL")
#Keyword: Desk
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Desk")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("HDESK")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Desk")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" Desk")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("desk ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" desk ")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains(" DESK ")
#Keyword: Networks/network
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Networks")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("networks")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("NETWORKS")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("NetWorks")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Network")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("network")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("NETWORK")
#Keyword: Technologist
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Technologist")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("technologist")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("TECHNOLOGIST")
#Desktop
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Desktop")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("desktop")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("DESKTOP")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("DeskTop")
#Tech
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("Tech")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("tech")
prospects['IT'] = prospects['IT'] | prospects['Title'].str.contains("TECH")
#IT Department fill-in, df.loc[<row selection>, <column selection>]
prospects.loc[prospects.IT >= 1, 'Department'] = "IT"
#Human Resources keywords --> fill corresponding Department column/row w/"HR"
#HR Keywords Check
#Keyword: Operations Manager
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Operations Manager")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("operations manager")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("OPERATIONS MANAGER")
#Keyword: Procurement
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Procurement")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("procurement")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("PROCUREMENT")
#Keyword: HR/hris
prospects['HR'] = prospects['Title'].str.contains("HR")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("hr ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" hr")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" hr ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("hris")
#Keyword: Hiring
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Hiring")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("hiring")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("hiring ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Hiring ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" hiring ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" Hiring")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("HIRING")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("HIRING ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" HIRING")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" HIRING ")
#Keyword: Career
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Career ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("career")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("CAREER")
#Keyword: Human
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Human ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("human")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("HUMAN")
#Keyword: Office
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Office ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("office ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("OFFICE ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Offices ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("offices ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("OFFICES ")
#Keyword: Corporate Social Responsibility - NEED TO TEST
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Responsibility")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("responsibility")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("RESPONSIBILITY")
#Keyword: CSR - NEED TO TEST
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("CSR")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("csr")
#Keyword: Employee Engagement
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("employee")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Employee")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("EMPLOYEE")
#Keyword: People
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("People")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("people")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("PEOPLE")
#Keyword: Talent
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("talent")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("TALENT")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Talent")
#Keyword: Employee
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Employee")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("employee")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("EMPLOYEE")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Employees")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("employees")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("EMPLOYEES")
#Keyword: Training
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Training")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("training")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("TRAINING")
#Keyword: Receptionist
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Receptionist")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("receptionist")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("RECEPTIONIST")
#Keyword: Leadership Development
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Leadership Development")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("leadership development")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("LEADERSHIP DEVELOPMENT")
#Keyword: Office Services
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Office Services")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("office services")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("OFFICE SERVICES")
#Keyword: Staff
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Staff")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("staff")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("STAFF")
#Keyword: Compensation & Benefits
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Compensation")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("compensation")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("COMPENSATION")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Benefits")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("benefits")
#Keyword: Recruit
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("recruit")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Recruit")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("RECRUIT")
#Keyword: Concierge
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Concierge")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("concierge")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("CONCIERGE")
#Keyword: Offices/Office
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Offices")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("offices")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("OFFICE")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("office ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Office ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("OFFICE ")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" Office")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" office")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains(" OFFICE")
#Engagement
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Engagement")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("engagement")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("ENGAGEMENT")
#Assistant
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("Assistant")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("assistant")
prospects['HR'] = prospects['HR'] | prospects['Title'].str.contains("ASSISTANT")
#HR Department fill-in, df.loc[<row selection>, <column selection>]
#>= 1 not 1?
prospects.loc[prospects.HR >= 1, 'Department'] = "HR"
#fill remaining Department column/row with "Executives"
prospects['Department'] = prospects.Department.fillna("Executives")
#Drop unneeded columns
prospects = prospects.drop(['Unnamed: 0', 'IT', 'HR' ], axis = 1)
#Export file
prospects.to_csv('/Users/open/Desktop/department.csv')
| gpl-2.0 |
datacommonsorg/api-python | datacommons_pandas/df_builder.py | 1 | 13177 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons Pandas API DataFrame Builder Module.
Provides functions for building pandas DataFrames using the Data Commons Graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import pandas as pd
import six
import datacommons_pandas.stat_vars as dc
def build_time_series(place,
stat_var,
measurement_method=None,
observation_period=None,
unit=None,
scaling_factor=None):
"""Constructs a pandas Series with `dates` as the index and corresponding `stat_var` statistics as values.
Args:
place (`str`): The dcid of Place to query for.
stat_var (`str`): The dcid of the StatisticalVariable.
measurement_method (`str`): Optional, the dcid of the preferred
`measurementMethod` value.
observation_period (`str`): Optional, the preferred
`observationPeriod` value.
unit (`str`): Optional, the dcid of the preferred `unit` value.
scaling_factor (`int`): Optional, the preferred `scalingFactor` value.
Returns:
A pandas Series with Place IDs as the index and observed statistics as
values, representing a sorted time series satisfying all optional args.
"""
result_dict = dc.get_stat_series(place, stat_var, measurement_method,observation_period, unit, scaling_factor)
# Explicit dtype to avoid warning thrown by pd.Series({})
if not result_dict:
return pd.Series(result_dict,dtype=object)
else:
return pd.Series(result_dict).sort_index()
def _group_stat_all_by_obs_options(places, stat_vars, keep_series=True):
"""Groups the result of `get_stat_all` by StatVarObservation options for time series or multivariates.
Note that this function does not preserve `(place, stat_var)` pairs that
yield no data `from get_stat_all`. In the extreme case that there is no
data for any pairs, raise a ValueError instead of returning an empty dict.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_vars (`Iterable` of `str`): The dcids of the StatisticalVariables.
keep_series (`boolean`): if True, output time series grouped by
StatVarObservation options; if False, output latest statistics grouped
by StatVarObservation options.
Returns:
A nested dict mapping each StatisticalVariable in `stat_vars` to its
StatVarObservation options. In turn, each StatVarObservation option
maps to a list of rows, one per place, with the place id and stat data.
Raises:
ValueError: If the payload returned by the Data Commons REST API is
malformed, or if there is no data for any (Place, StatisticalVariables)
pair.
"""
if keep_series:
if len(stat_vars) != 1:
raise ValueError(
'When `keep_series` is set, only one StatisticalVariable for `stat_vars` is allowed.'
)
res = collections.defaultdict(list)
else:
res = collections.defaultdict(lambda: collections.defaultdict(list))
stat_all = dc.get_stat_all(places, stat_vars)
for place, place_data in stat_all.items():
if not place_data:
continue
for stat_var, stat_var_data in place_data.items():
if not stat_var_data:
continue
for source_series in stat_var_data['sourceSeries']:
series = source_series['val']
# Convert dict of SVO options into nested tuple (hashable key).
obs_options = (('measurementMethod',
source_series.get('measurementMethod')),
('observationPeriod',
source_series.get('observationPeriod')),
('unit', source_series.get('unit')),
('scalingFactor',
source_series.get('scalingFactor')))
if keep_series:
res[obs_options].append(dict({'place': place}, **series))
else:
date = max(series)
res[stat_var][obs_options].append({
'place': place,
'date': date,
'val': series[date]
})
if not res:
raise ValueError(
'No data for any of specified Places and StatisticalVariables.')
if keep_series:
return dict(res)
else:
return {k: dict(v) for k, v in res.items()}
def _time_series_pd_input(places, stat_var):
"""Returns a `list` of `dict` per element of `places` based on the `stat_var`.
Data Commons will pick a set of StatVarObservation options that covers the
maximum number of queried places. Among ties, Data Commons selects an option
set with the latest Observation.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_var (`str`): The dcid of the StatisticalVariable.
Returns:
A `list` of `dict`, one per element of `places`. Each `dict` consists of
the time series and place identifier.
Examples:
>>> _time_series_pd_input(["geoId/29", "geoId/33"], "Count_Person")
[
{'2020-03-07': 20, '2020-03-08': 40, 'place': 'geoId/29'},
{'2020-08-21': 428, '2020-08-22': 429, 'place': 'geoId/33'}
]
"""
rows_dict = _group_stat_all_by_obs_options(places, [stat_var],
keep_series=True)
most_geos = []
max_geo_count_so_far = 0
latest_date = []
latest_date_so_far = ''
for options, rows in rows_dict.items():
current_geos = len(rows)
if current_geos > max_geo_count_so_far:
max_geo_count_so_far = current_geos
most_geos = [options]
# Reset tiebreaker stats. Recompute after this if-else block.
latest_date = []
latest_date_so_far = ''
elif current_geos == max_geo_count_so_far:
most_geos.append(options)
else:
# Do not compute tiebreaker stats if no change to most_geos.
# Skip to top of the for loop.
continue
for row in rows:
dates = set(row.keys())
dates.remove('place')
row_max_date = max(dates)
if row_max_date > latest_date_so_far:
latest_date_so_far = row_max_date
latest_date = [options]
elif row_max_date == latest_date_so_far:
latest_date.append(options)
for options in most_geos:
if options in latest_date:
return rows_dict[options]
def build_time_series_dataframe(places, stat_var, desc_col=False):
"""Constructs a pandas DataFrame with `places` as the index and dates of the time series as the columns.
To ensure statistics are comparable across all Places, when multiple
StatVarObservations options are available for Place and StatVar combos,
Data Commons selects the StatVarObservation options that covers the most
Places, and breaks ties using the StatVarObservation options that yield
the latest Observation for any Place.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_var (`str`): The dcid of the StatisticalVariable.
desc_col: Whether to order columns in descending order.
Returns:
A pandas DataFrame with Place IDs as the index, and sorted dates as columns.
"""
try:
if isinstance(places, six.string_types):
places = [places]
else:
places = list(places)
assert all(isinstance(place, six.string_types) for place in places)
except:
raise ValueError(
'Parameter `places` must be a string object or list-like object of string.'
)
if not isinstance(stat_var, six.string_types):
raise ValueError('Parameter `stat_var` must be a string.')
df = pd.DataFrame.from_records(_time_series_pd_input(places, stat_var))
df.set_index('place', inplace=True)
df.sort_index(inplace=True)
return df[sorted(df.columns, reverse=desc_col)]
def _multivariate_pd_input(places, stat_vars):
"""Returns a `list` of `dict` per element of `places` based on the `stat_var`.
Data Commons will pick a set of StatVarObservation options that covers the
maximum number of queried places. Among ties, Data Commons selects an option
set with the latest Observation.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_vars (`Iterable` of `str`): The dcids of the StatisticalVariables.
Returns:
A `list` of `dict`, one per element of `places`. Each `dict` consists of
the time series and place identifier.
Examples:
>>> _multivariate_pd_input(["geoId/29", "geoId/33"],
["Count_Person", "Median_Income_Person"])
[
{'Count_Person': 20, 'Median_Income_Person': 40, 'place': 'geoId/29'},
{'Count_Person': 428, 'Median_Income_Person': 429, 'place': 'geoId/33'}
]
"""
rows_dict = _group_stat_all_by_obs_options(places,
stat_vars,
keep_series=False)
place2cov = collections.defaultdict(dict) # {geo: {var1: 3, var2: 33}}
for stat_var, candidates_dict in rows_dict.items():
selected_rows = None
most_geos = []
max_geo_count_so_far = 0
latest_date = []
latest_date_so_far = ''
for options, rows in candidates_dict.items():
current_geos = len(rows)
if current_geos > max_geo_count_so_far:
max_geo_count_so_far = current_geos
most_geos = [options]
# Reset tiebreaker stats. Recompute after this if-else block.
latest_date = []
latest_date_so_far = ''
elif current_geos == max_geo_count_so_far:
most_geos.append(options)
else:
# Do not compute tiebreaker stats if not in most_geos.
continue
for row in rows:
row_date = row['date']
if row_date > latest_date_so_far:
latest_date_so_far = row_date
latest_date = [options]
elif row_date == latest_date_so_far:
latest_date.append(options)
for options in most_geos:
if options in latest_date:
selected_rows = candidates_dict[options]
for row in selected_rows:
place2cov[row['place']][stat_var] = row['val']
return [
dict({'place': place}, **multivariates)
for place, multivariates in place2cov.items()
]
def build_multivariate_dataframe(places, stat_vars):
"""Constructs a pandas DataFrame with `places` as the index and `stat_vars` as the columns.
To ensure statistics are comparable across all Places, when multiple
StatVarObservations options are available for Place and StatVar combos,
Data Commons selects the StatVarObservation options that covers the most
Places, and breaks ties using the StatVarObservation options that yield
the latest Observation for any Place.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_vars (`Iterable` of `str`): The dcids of the StatisticalVariables.
Returns:
A pandas DataFrame with Place IDs as the index and `stat_vars` as columns.
"""
try:
if isinstance(places, six.string_types):
places = [places]
else:
places = list(places)
assert all(isinstance(place, six.string_types) for place in places)
if isinstance(stat_vars, six.string_types):
stat_vars = [stat_vars]
else:
stat_vars = list(stat_vars)
assert all(
isinstance(stat_var, six.string_types)
for stat_var in stat_vars)
except:
raise ValueError(
'Parameter `places` and `stat_vars` must be string object or list-like object.'
)
df = pd.DataFrame.from_records(_multivariate_pd_input(places, stat_vars))
df.set_index('place', inplace=True)
df.sort_index(inplace=True)
return df
| apache-2.0 |
tawsifkhan/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
Statoil/libres | python/res/enkf/export/gen_kw_collector.py | 2 | 2529 | import math
from pandas import DataFrame, MultiIndex
import numpy
from res.enkf import ErtImplType, EnKFMain, EnkfFs, RealizationStateEnum, GenKwConfig
from res.enkf.key_manager import KeyManager
from res.enkf.plot_data import EnsemblePlotGenKW
from ecl.util.util import BoolVector
class GenKwCollector(object):
@staticmethod
def createActiveList(ert, fs):
state_map = fs.getStateMap()
ens_mask = BoolVector(False, ert.getEnsembleSize())
state_map.selectMatching(ens_mask, RealizationStateEnum.STATE_INITIALIZED | RealizationStateEnum.STATE_HAS_DATA)
active_list = BoolVector.createActiveList(ens_mask)
return [iens for iens in active_list]
@staticmethod
def getAllGenKwKeys(ert):
""" @rtype: list of str """
key_manager = KeyManager(ert)
return key_manager.genKwKeys()
@staticmethod
def loadAllGenKwData(ert, case_name, keys=None):
"""
@type ert: EnKFMain
@type case_name: str
@type keys: list of str
@rtype: DataFrame
"""
fs = ert.getEnkfFsManager().getFileSystem(case_name)
realizations = GenKwCollector.createActiveList(ert, fs)
gen_kw_keys = GenKwCollector.getAllGenKwKeys(ert)
if keys is not None:
gen_kw_keys = [key for key in keys if key in gen_kw_keys] # ignore keys that doesn't exist
gen_kw_array = numpy.empty(shape=(len(gen_kw_keys), len(realizations)), dtype=numpy.float64)
gen_kw_array.fill(numpy.nan)
for column_index, key in enumerate(gen_kw_keys):
key, keyword = key.split(":")
use_log_scale = False
if key.startswith("LOG10_"):
key = key[6:]
use_log_scale = True
ensemble_config_node = ert.ensembleConfig().getNode(key)
ensemble_data = EnsemblePlotGenKW(ensemble_config_node, fs)
keyword_index = ensemble_data.getIndexForKeyword(keyword)
for realization_index, realization_number in enumerate(realizations):
realization_vector = ensemble_data[realization_number]
value = realization_vector[keyword_index]
if use_log_scale:
value = math.log10(value)
gen_kw_array[column_index][realization_index] = value
gen_kw_data = DataFrame(data=numpy.transpose(gen_kw_array), index=realizations, columns=gen_kw_keys)
gen_kw_data.index.name = "Realization"
return gen_kw_data
| gpl-3.0 |
dssg/wikienergy | disaggregator/build/pandas/pandas/stats/tests/test_moments.py | 3 | 89255 | import nose
import sys
import functools
import warnings
from datetime import datetime
from numpy.random import randn
from numpy.testing.decorators import slow
import numpy as np
from distutils.version import LooseVersion
from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull, concat
from pandas.util.testing import (
assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_index_equal
)
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
from pandas.compat import range, zip, PY3, StringIO
N, K = 100, 10
class Base(tm.TestCase):
_multiprocess_can_split_ = True
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestMoments(Base):
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,Series(np.ones(10)),3,center=True ,axis=1)
# ok ok
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=0)
mom.rolling_mean(DataFrame(np.ones((10,10))),3,center=True ,axis=1)
# bad axis
self.assertRaises(ValueError, mom.rolling_mean,DataFrame(np.ones((10,10))),3,center=True ,axis=2)
def test_rolling_sum(self):
self._check_moment_func(mom.rolling_sum, np.sum)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(mom.rolling_count, counter,
has_min_periods=False,
preserve_nan=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(mom.rolling_mean, np.mean)
def test_cmov_mean(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49,
16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_mean(vals, 5, center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_mean(Series(vals), 5, center=True)
assert_series_equal(xp, rs)
def test_cmov_window(self):
# GH 8238
tm._skip_if_no_scipy()
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xp = np.array([np.nan, np.nan, 9.962, 11.27 , 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
assert_almost_equal(xp, rs)
xp = Series(rs)
rs = mom.rolling_window(Series(vals), 5, 'boxcar', center=True)
assert_series_equal(xp, rs)
def test_cmov_window_corner(self):
# GH 8238
tm._skip_if_no_scipy()
# all nan
vals = np.empty(10, dtype=float)
vals.fill(np.nan)
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertTrue(np.isnan(rs).all())
# empty
vals = np.array([])
rs = mom.rolling_window(vals, 5, 'boxcar', center=True)
self.assertEqual(len(rs), 0)
# shorter than window
vals = np.random.randn(5)
rs = mom.rolling_window(vals, 10, 'boxcar')
self.assertTrue(np.isnan(rs).all())
self.assertEqual(len(rs), 5)
def test_cmov_window_frame(self):
# Gh 8238
tm._skip_if_no_scipy()
vals = np.array([[ 12.18, 3.64],
[ 10.18, 9.16],
[ 13.24, 14.61],
[ 4.51, 8.11],
[ 6.15, 11.44],
[ 9.14, 6.21],
[ 11.31, 10.67],
[ 2.94, 6.51],
[ 9.42, 8.39],
[ 12.44, 7.34 ]])
xp = np.array([[ np.nan, np.nan],
[ np.nan, np.nan],
[ 9.252, 9.392],
[ 8.644, 9.906],
[ 8.87 , 10.208],
[ 6.81 , 8.588],
[ 7.792, 8.644],
[ 9.05 , 7.824],
[ np.nan, np.nan],
[ np.nan, np.nan]])
# DataFrame
rs = mom.rolling_window(DataFrame(vals), 5, 'boxcar', center=True)
assert_frame_equal(DataFrame(xp), rs)
def test_cmov_window_na_min_periods(self):
tm._skip_if_no_scipy()
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = mom.rolling_mean(vals, 5, min_periods=4, center=True)
rs = mom.rolling_window(vals, 5, 'boxcar', min_periods=4, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009,
14.03687, 13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556,
13.33889, 13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559,
14.17267, 14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671,
14.03559, 15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607,
14.20036, 14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575,
14.3675, 14.0825, 11.5675, np.nan, np.nan]}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt in win_types:
rs = mom.rolling_window(Series(vals), 5, wt, center=True)
assert_series_equal(xp, rs)
def test_cmov_window_regular_missing_data(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, np.nan, 10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345,
9.17869, 12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425,
9.1925, 12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599,
9.1764, 12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384,
9.56348, 12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618,
9.16786, 13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667,
10.34667, 12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098,
13.65509]
}
for wt in win_types:
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, min_periods=3)
assert_series_equal(xp, rs)
def test_cmov_window_special(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81,
13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763,
13.89053, 13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589,
11.73161, 13.08516, 12.95111, 12.74577,
np.nan, np.nan],
'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284,
12.88331, 12.96079, 12.77008, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161,
12.75129, 12.90702, 12.83757, np.nan, np.nan]
}
for wt, k in zip(win_types, kwds):
xp = Series(xps[wt])
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_cmov_window_special_linear_range(self):
# GH 8238
tm._skip_if_no_scipy()
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
{'width': 0.5}]
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
for wt, k in zip(win_types, kwds):
rs = mom.rolling_window(Series(vals), 5, wt, center=True,
**k)
assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(mom.rolling_median, np.median)
def test_rolling_min(self):
self._check_moment_func(mom.rolling_min, np.min)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_min(a, window=100, min_periods=1)
assert_almost_equal(b, np.ones(len(a)))
self.assertRaises(ValueError, mom.rolling_min, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_max(self):
self._check_moment_func(mom.rolling_max, np.max)
a = np.array([1, 2, 3, 4, 5])
b = mom.rolling_max(a, window=100, min_periods=1)
assert_almost_equal(a, b)
self.assertRaises(ValueError, mom.rolling_max, np.array([1,
2, 3]), window=3, min_periods=5)
def test_rolling_quantile(self):
qs = [.1, .5, .9]
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = per / 1. * (values.shape[0] - 1)
return values[int(idx)]
for q in qs:
def f(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_quantile(x, window, q,
min_periods=min_periods,
freq=freq,
center=center)
def alt(x):
return scoreatpercentile(x, q)
self._check_moment_func(f, alt)
def test_rolling_apply(self):
# suppress warnings about empty slices, as we are deliberately testing with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
ser = Series([])
assert_series_equal(ser, mom.rolling_apply(ser, 10, lambda x: x.mean()))
def roll_mean(x, window, min_periods=None, freq=None, center=False):
return mom.rolling_apply(x, window,
lambda x: x[np.isfinite(x)].mean(),
min_periods=min_periods,
freq=freq,
center=center)
self._check_moment_func(roll_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.rolling_apply(s, 2, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 2.])
assert_series_equal(result, expected)
def test_rolling_apply_out_of_bounds(self):
# #1850
arr = np.arange(4)
# it works!
result = mom.rolling_apply(arr, 10, np.sum)
self.assertTrue(isnull(result).all())
result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)
assert_almost_equal(result, result)
def test_rolling_std(self):
self._check_moment_func(mom.rolling_std,
lambda x: np.std(x, ddof=1))
self._check_moment_func(functools.partial(mom.rolling_std, ddof=0),
lambda x: np.std(x, ddof=0))
def test_rolling_std_1obs(self):
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1)
expected = np.array([np.nan] * 5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),
1, min_periods=1, ddof=0)
expected = np.zeros(5)
assert_almost_equal(result, expected)
result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),
3, min_periods=2)
self.assertTrue(np.isnan(result[2]))
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = np.array([0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767])
b = mom.rolling_std(a, window=3)
self.assertTrue(np.isfinite(b[2:]).all())
b = mom.ewmstd(a, span=3)
self.assertTrue(np.isfinite(b[2:]).all())
def test_rolling_var(self):
self._check_moment_func(mom.rolling_var,
lambda x: np.var(x, ddof=1),
test_stable=True)
self._check_moment_func(functools.partial(mom.rolling_var, ddof=0),
lambda x: np.var(x, ddof=0))
def test_rolling_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_skew,
lambda x: skew(x, bias=False))
def test_rolling_kurt(self):
try:
from scipy.stats import kurtosis
except ImportError:
raise nose.SkipTest('no scipy')
self._check_moment_func(mom.rolling_kurt,
lambda x: kurtosis(x, bias=False))
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
arr = np.frombuffer(data, dtype='<f8')
if sys.byteorder != "little":
arr = arr.byteswap().newbyteorder()
result = mom.rolling_sum(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_mean(arr, 2)
self.assertTrue((result[1:] >= 0).all())
result = mom.rolling_var(arr, 2)
self.assertTrue((result[1:] >= 0).all())
# #2527, ugh
arr = np.array([0.00012456, 0.0003, 0])
result = mom.rolling_mean(arr, 1)
self.assertTrue(result[-1] >= 0)
result = mom.rolling_mean(-arr, 1)
self.assertTrue(result[-1] <= 0)
def _check_moment_func(self, func, static_comp, window=50,
has_min_periods=True,
has_center=True,
has_time_rule=True,
preserve_nan=True,
fill_value=None,
test_stable=False):
self._check_ndarray(func, static_comp, window=window,
has_min_periods=has_min_periods,
preserve_nan=preserve_nan,
has_center=has_center,
fill_value=fill_value,
test_stable=test_stable)
self._check_structures(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
fill_value=fill_value,
has_center=has_center)
def _check_ndarray(self, func, static_comp, window=50,
has_min_periods=True,
preserve_nan=True,
has_center=True,
fill_value=None,
test_stable=False,
test_window=True):
result = func(self.arr, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
if has_min_periods:
result = func(arr, 50, min_periods=30)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# min_periods is working correctly
result = func(arr, 20, min_periods=15)
self.assertTrue(np.isnan(result[23]))
self.assertFalse(np.isnan(result[24]))
self.assertFalse(np.isnan(result[-6]))
self.assertTrue(np.isnan(result[-5]))
arr2 = randn(20)
result = func(arr2, 10, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, 20, min_periods=0)
result1 = func(arr, 20, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr, 50)
assert_almost_equal(result[-1], static_comp(arr[10:-10]))
# GH 7925
if has_center:
if has_min_periods:
result = func(arr, 20, min_periods=15, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20, min_periods=15)[9:]
else:
result = func(arr, 20, center=True)
expected = func(np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]
self.assert_numpy_array_equivalent(result, expected)
if test_stable:
result = func(self.arr + 1e9, window)
assert_almost_equal(result[-1],
static_comp(self.arr[-50:] + 1e9))
# Test window larger than array, #7297
if test_window:
if has_min_periods:
for minp in (0, len(self.arr)-1, len(self.arr)):
result = func(self.arr, len(self.arr)+1, min_periods=minp)
expected = func(self.arr, len(self.arr), min_periods=minp)
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask,
np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = func(self.arr, len(self.arr)+1)
expected = func(self.arr, len(self.arr))
nan_mask = np.isnan(result)
self.assertTrue(np.array_equal(nan_mask, np.isnan(expected)))
nan_mask = ~nan_mask
assert_almost_equal(result[nan_mask], expected[nan_mask])
def _check_structures(self, func, static_comp,
has_min_periods=True, has_time_rule=True,
has_center=True,
fill_value=None):
series_result = func(self.series, 50)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, 50)
self.assertEqual(type(frame_result), DataFrame)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
if has_min_periods:
series_result = func(self.series[::2], win, min_periods=minp,
freq='B')
frame_result = func(self.frame[::2], win, min_periods=minp,
freq='B')
else:
series_result = func(self.series[::2], win, freq='B')
frame_result = func(self.frame[::2], win, freq='B')
last_date = series_result.index[-1]
prev_date = last_date - 24 * datetools.bday
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
assert_almost_equal(series_result[-1], static_comp(trunc_series))
assert_almost_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp))
# GH 7925
if has_center:
if has_min_periods:
minp = 10
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25, min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, min_periods=minp,
center=True)
frame_rs = func(self.frame, 25, min_periods=minp,
center=True)
else:
series_xp = func(self.series.reindex(list(self.series.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.series.index)
frame_xp = func(self.frame.reindex(list(self.frame.index)+['x%d'%x for x in range(12)]), 25).shift(-12).reindex(self.frame.index)
series_rs = func(self.series, 25, center=True)
frame_rs = func(self.frame, 25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
assert_series_equal(series_xp, series_rs)
assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(mom.ewma)
arr = np.zeros(1000)
arr[5] = 1
result = mom.ewma(arr, span=100, adjust=False).sum()
self.assertTrue(np.abs(result - 1) < 1e-2)
s = Series([1.0, 2.0, 4.0, 8.0])
expected = Series([1.0, 1.6, 2.736842, 4.923077])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=True),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=True, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
for f in [lambda s: mom.ewma(s, com=2.0, adjust=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=False),
lambda s: mom.ewma(s, com=2.0, adjust=False, ignore_na=True),
]:
result = f(s)
assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [1.] * len(s))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = mom.ewma(s, com=5)
assert_almost_equal(result, [np.nan] * 2 + [1.] * 4)
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha)**2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha)**2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha), np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha)**3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha)**3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha)**2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha)**3, np.nan, (1. - alpha) * alpha, alpha * ((1. - alpha)**2 + alpha)]),
(s3, False, True, [(1. - alpha)**2, np.nan, (1. - alpha) * alpha, alpha]),
]:
expected = simple_wma(s, Series(w))
result = mom.ewma(s, com=com, adjust=adjust, ignore_na=ignore_na)
assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = mom.ewma(s, com=com, adjust=adjust)
assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(mom.ewmvar)
def test_ewmvol(self):
self._check_ew(mom.ewmvol)
def test_ewma_span_com_args(self):
A = mom.ewma(self.arr, com=9.5)
B = mom.ewma(self.arr, span=20)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ewma_halflife_arg(self):
A = mom.ewma(self.arr, com=13.932726172912965)
B = mom.ewma(self.arr, halflife=10.0)
assert_almost_equal(A, B)
self.assertRaises(Exception, mom.ewma, self.arr, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr, com=9.5, span=20, halflife=50)
self.assertRaises(Exception, mom.ewma, self.arr)
def test_ew_empty_arrays(self):
arr = np.array([], dtype=np.float64)
funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]
for f in funcs:
result = f(arr, 3)
assert_almost_equal(result, arr)
def _check_ew(self, func):
self._check_ew_ndarray(func)
self._check_ew_structures(func)
def _check_ew_ndarray(self, func, preserve_nan=False):
result = func(self.arr, com=10)
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = func(s, 50, min_periods=2)
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
for min_periods in (0, 1):
result = func(s, 50, min_periods=min_periods)
if func == mom.ewma:
self.assertTrue(np.isnan(result.values[:10]).all())
self.assertFalse(np.isnan(result.values[10:]).any())
else:
# ewmstd, ewmvol, ewmvar (with bias=False) require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), 50, min_periods=min_periods)
if func == mom.ewma:
assert_series_equal(result, Series([1.]))
else:
# ewmstd, ewmvol, ewmvar with bias=False require at least two values
assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = func(np.arange(50), span=10)
self.assertEqual(result2.dtype, np.float_)
def _check_ew_structures(self, func):
series_result = func(self.series, com=10)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame, com=10)
self.assertEqual(type(frame_result), DataFrame)
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7., 12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3., 12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=['a', 'b', 99, 'd', 'd']),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel()
return len(set(values[notnull(values)])) == 1
def no_nans(x):
return x.notnull().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [ (x, is_constant(x), no_nans(x)) for x in data ]
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
def _test_moments_consistency(self,
min_periods,
count, mean, mock_mean, corr,
var_unbiased=None, std_unbiased=None, cov_unbiased=None,
var_biased=None, std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notnull(values)].tolist())
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected)
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = x.max().max()
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
self.assertFalse((var_x < 0).any().any())
self.assertFalse((std_x < 0).any().any())
if cov:
cov_x_x = cov(x, x)
self.assertFalse((cov_x_x < 0).any().any())
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
self.assertFalse((var_x > 0).any().any())
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isnull().equals(y.isnull()):
# can only easily test two Series with similar structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) - var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) * std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) - mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@slow
def test_ewm_consistency(self):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([ _weights(s.iloc[:, i],
com=com,
adjust=adjust,
ignore_na=ignore_na) for i, _ in enumerate(s.columns) ],
axis=1)
w.index=s.index
w.columns=s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notnull()] = _weights(s[s.notnull()], com=com, adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha, i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method='ffill')
result[mom.expanding_count(s) < (max(min_periods, 1) if min_periods else 1)] = np.nan
return result
com = 3.
for min_periods in [0, 1, 2, 3, 4]:
for adjust in [True, False]:
for ignore_na in [False, True]:
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
mock_mean=lambda x: _ewma(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
corr=lambda x, y: mom.ewmcorr(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na),
var_unbiased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
std_unbiased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
cov_unbiased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=False),
var_biased=lambda x: mom.ewmvar(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
std_biased=lambda x: mom.ewmstd(x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
cov_biased=lambda x, y: mom.ewmcov(x, y, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, bias=True),
var_debiasing_factors=lambda x: _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na))
@slow
def test_expanding_consistency(self):
base_functions = [
(mom.expanding_count, lambda v: Series(v).count(), None),
(mom.expanding_max, lambda v: Series(v).max(), None),
(mom.expanding_min, lambda v: Series(v).min(), None),
(mom.expanding_sum, lambda v: Series(v).sum(), None),
(mom.expanding_mean, lambda v: Series(v).mean(), None),
(mom.expanding_std, lambda v: Series(v).std(), 1),
(mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.expanding_var, lambda v: Series(v).var(), 1),
#(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.expanding_median, lambda v: Series(v).median(), None),
(mom.expanding_max, np.nanmax, 1),
(mom.expanding_min, np.nanmin, 1),
(mom.expanding_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.expanding_mean, np.nanmean, 1),
(mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.expanding_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.expanding_max, np.max, None),
(mom.expanding_min, np.min, None),
(mom.expanding_sum, np.sum, None),
(mom.expanding_mean, np.mean, None),
(mom.expanding_std, lambda v: np.std(v, ddof=1), 1),
(mom.expanding_var, lambda v: np.var(v, ddof=1), 1),
(mom.expanding_median, np.median, None),
]
# suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning)
for min_periods in [0, 1, 2, 3, 4]:
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=mom.expanding_count,
mean=lambda x: mom.expanding_mean(x, min_periods=min_periods),
mock_mean=lambda x: mom.expanding_sum(x, min_periods=min_periods) / mom.expanding_count(x),
corr=lambda x, y: mom.expanding_corr(x, y, min_periods=min_periods),
var_unbiased=lambda x: mom.expanding_var(x, min_periods=min_periods),
std_unbiased=lambda x: mom.expanding_std(x, min_periods=min_periods),
cov_unbiased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods),
var_biased=lambda x: mom.expanding_var(x, min_periods=min_periods, ddof=0),
std_biased=lambda x: mom.expanding_std(x, min_periods=min_periods, ddof=0),
cov_biased=lambda x, y: mom.expanding_cov(x, y, min_periods=min_periods, ddof=0),
var_debiasing_factors=lambda x: mom.expanding_count(x) / (mom.expanding_count(x) - 1.).replace(0., np.nan)
)
# test consistency between expanding_xyz() and either (a) expanding_apply of Series.xyz(),
# or (b) expanding_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (expanding_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if expanding_f is mom.expanding_count:
expanding_f_result = expanding_f(x)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=0)
else:
if expanding_f in [mom.expanding_cov, mom.expanding_corr]:
expanding_f_result = expanding_f(x, min_periods=min_periods, pairwise=False)
else:
expanding_f_result = expanding_f(x, min_periods=min_periods)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
assert_equal(expanding_f_result, expanding_apply_f_result)
if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
# test pairwise=True
expanding_f_result = expanding_f(x, x, min_periods=min_periods, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = expanding_f(x.iloc[:, i], x.iloc[:, j], min_periods=min_periods)
assert_panel_equal(expanding_f_result, expected)
@slow
def test_rolling_consistency(self):
base_functions = [
(mom.rolling_count, lambda v: Series(v).count(), None),
(mom.rolling_max, lambda v: Series(v).max(), None),
(mom.rolling_min, lambda v: Series(v).min(), None),
(mom.rolling_sum, lambda v: Series(v).sum(), None),
(mom.rolling_mean, lambda v: Series(v).mean(), None),
(mom.rolling_std, lambda v: Series(v).std(), 1),
(mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
(mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
(mom.rolling_var, lambda v: Series(v).var(), 1),
#(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
#(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
#(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
# lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
(mom.rolling_median, lambda v: Series(v).median(), None),
(mom.rolling_max, np.nanmax, 1),
(mom.rolling_min, np.nanmin, 1),
(mom.rolling_sum, np.nansum, 1),
]
if np.__version__ >= LooseVersion('1.8.0'):
base_functions += [
(mom.rolling_mean, np.nanmean, 1),
(mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1),
]
if np.__version__ >= LooseVersion('1.9.0'):
base_functions += [
(mom.rolling_median, np.nanmedian, 1),
]
no_nan_functions = [
(mom.rolling_max, np.max, None),
(mom.rolling_min, np.min, None),
(mom.rolling_sum, np.sum, None),
(mom.rolling_mean, np.mean, None),
(mom.rolling_std, lambda v: np.std(v, ddof=1), 1),
(mom.rolling_var, lambda v: np.var(v, ddof=1), 1),
(mom.rolling_median, np.median, None),
]
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: mom.rolling_count(x, window=window, center=center),
mean=lambda x: mom.rolling_mean(x, window=window, min_periods=min_periods, center=center),
mock_mean=lambda x: mom.rolling_sum(x, window=window, min_periods=min_periods, center=center).divide(
mom.rolling_count(x, window=window, center=center)),
corr=lambda x, y: mom.rolling_corr(x, y, window=window, min_periods=min_periods, center=center),
var_unbiased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center),
std_unbiased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center),
cov_unbiased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center),
var_biased=lambda x: mom.rolling_var(x, window=window, min_periods=min_periods, center=center, ddof=0),
std_biased=lambda x: mom.rolling_std(x, window=window, min_periods=min_periods, center=center, ddof=0),
cov_biased=lambda x, y: mom.rolling_cov(x, y, window=window, min_periods=min_periods, center=center, ddof=0),
var_debiasing_factors=lambda x: mom.rolling_count(x, window=window, center=center).divide(
(mom.rolling_count(x, window=window, center=center) - 1.).replace(0., np.nan)),
)
# test consistency between rolling_xyz() and either (a) rolling_apply of Series.xyz(),
# or (b) rolling_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
functions = base_functions
# GH 8269
if no_nans:
functions = base_functions + no_nan_functions
for (rolling_f, f, require_min_periods) in functions:
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
if rolling_f is mom.rolling_count:
rolling_f_result = rolling_f(x, window=window, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=0, center=center)
else:
if rolling_f in [mom.rolling_cov, mom.rolling_corr]:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center, pairwise=False)
else:
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=min_periods, center=center)
assert_equal(rolling_f_result, rolling_apply_f_result)
if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
# test pairwise=True
rolling_f_result = rolling_f(x, x, window=window, min_periods=min_periods,
center=center, pairwise=True)
expected = Panel(items=x.index, major_axis=x.columns, minor_axis=x.columns)
for i, _ in enumerate(x.columns):
for j, _ in enumerate(x.columns):
expected.iloc[:, i, j] = rolling_f(x.iloc[:, i], x.iloc[:, j],
window=window, min_periods=min_periods, center=center)
assert_panel_equal(rolling_f_result, expected)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_cov(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment(mom.rolling_cov, 10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = mom.rolling_corr(A, B, 50, min_periods=25)
assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = mom.rolling_corr(a, b, len(a), min_periods=1)
assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment(mom.rolling_corr, 10, min_periods=5)
def _check_pairwise_moment(self, func, *args, **kwargs):
panel = func(self.frame, *args, **kwargs)
actual = panel.ix[:, 1, 5]
expected = func(self.frame[1], self.frame[5], *args, **kwargs)
tm.assert_series_equal(actual, expected)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
self.assertRaises(TypeError, mom._flex_binary_moment,5,6,None)
def test_corr_sanity(self):
#GH 3155
df = DataFrame(
np.array(
[[ 0.87024726, 0.18505595],
[ 0.64355431, 0.3091617 ],
[ 0.92372966, 0.50552513],
[ 0.00203756, 0.04520709],
[ 0.84780328, 0.33394331],
[ 0.78369152, 0.63919667]])
)
res = mom.rolling_corr(df[0],df[1],5,center=True)
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
# and some fuzzing
for i in range(10):
df = DataFrame(np.random.rand(30,2))
res = mom.rolling_corr(df[0],df[1],5,center=True)
try:
self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res]))
except:
print(res)
def test_flex_binary_frame(self):
def _check(method):
series = self.frame[1]
res = method(series, self.frame, 10)
res2 = method(self.frame, series, 10)
exp = self.frame.apply(lambda x: method(series, x, 10))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = method(self.frame, frame2, 10)
exp = DataFrame(dict((k, method(self.frame[k], frame2[k], 10))
for k in self.frame))
tm.assert_frame_equal(res3, exp)
methods = [mom.rolling_corr, mom.rolling_cov]
for meth in methods:
_check(meth)
def test_ewmcov(self):
self._check_binary_ew(mom.ewmcov)
def test_ewmcov_pairwise(self):
self._check_pairwise_moment(mom.ewmcov, span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew(mom.ewmcorr)
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment(mom.ewmcorr, span=10, min_periods=5)
def _check_binary_ew(self, func):
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
self.assertTrue(np.isnan(result.values[:14]).all())
self.assertFalse(np.isnan(result.values[14:]).any())
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at least two values
self.assertTrue(np.isnan(result.values[:11]).all())
self.assertFalse(np.isnan(result.values[11:]).any())
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
assert_series_equal(result, Series([]))
# check series of length 1
result = func(Series([1.]), Series([1.]), 50, min_periods=min_periods)
assert_series_equal(result, Series([np.NaN]))
self.assertRaises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply(self):
ser = Series([])
assert_series_equal(ser, mom.expanding_apply(ser, lambda x: x.mean()))
def expanding_mean(x, min_periods=1, freq=None):
return mom.expanding_apply(x,
lambda x: x.mean(),
min_periods=min_periods,
freq=freq)
self._check_expanding(expanding_mean, np.mean)
# GH 8080
s = Series([None, None, None])
result = mom.expanding_apply(s, lambda x: len(x), min_periods=0)
expected = Series([1., 2., 3.])
assert_series_equal(result, expected)
def test_expanding_apply_args_kwargs(self):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = mom.expanding_apply(df, np.mean) + 20.
assert_frame_equal(mom.expanding_apply(df, mean_w_arg, args=(20,)),
expected)
assert_frame_equal(mom.expanding_apply(df, mean_w_arg,
kwargs={'const' : 20}),
expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = mom.expanding_corr(A, B)
rolling_result = mom.rolling_corr(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = mom.expanding_count(self.series)
assert_almost_equal(result, mom.rolling_count(self.series,
len(self.series)))
def test_expanding_quantile(self):
result = mom.expanding_quantile(self.series, 0.5)
rolling_result = mom.rolling_quantile(self.series,
len(self.series),
0.5, min_periods=1)
assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = mom.expanding_cov(A, B)
rolling_result = mom.rolling_cov(A, B, len(A), min_periods=1)
assert_almost_equal(rolling_result, result)
def test_expanding_max(self):
self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)
def test_expanding_cov_pairwise(self):
result = mom.expanding_cov(self.frame)
rolling_result = mom.rolling_cov(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_corr_pairwise(self):
result = mom.expanding_corr(self.frame)
rolling_result = mom.rolling_corr(self.frame, len(self.frame),
min_periods=1)
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_cov(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_cov(s1, s2)
expected = Series([None, None, None, 4.5])
assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.expanding_corr(s1, s2a)
assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = mom.expanding_corr(s1, s2)
expected = Series([None, None, None, 1.])
assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_cov(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 2.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_cov(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = mom.rolling_corr(s1, s2, window=3, min_periods=2)
expected = Series([None, None, 1.0])
assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = mom.rolling_corr(s1, s2a, window=3, min_periods=2)
assert_series_equal(result, expected)
def test_rolling_functions_window_non_shrinkage(self):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1,5], [3, 2], [3,9], [-1,0]], columns=['A','B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
df_expected_panel = Panel(items=df.index, major_axis=df.columns, minor_axis=df.columns)
functions = [lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df_result = f(df)
assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
assert_panel_equal(df_result_panel, df_expected_panel)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns)
df2 = DataFrame(columns=['a'])
df2_expected = df2
df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns)
functions = [lambda x: mom.expanding_count(x),
lambda x: mom.expanding_cov(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=False, min_periods=5),
lambda x: mom.expanding_max(x, min_periods=5),
lambda x: mom.expanding_min(x, min_periods=5),
lambda x: mom.expanding_sum(x, min_periods=5),
lambda x: mom.expanding_mean(x, min_periods=5),
lambda x: mom.expanding_std(x, min_periods=5),
lambda x: mom.expanding_var(x, min_periods=5),
lambda x: mom.expanding_skew(x, min_periods=5),
lambda x: mom.expanding_kurt(x, min_periods=5),
lambda x: mom.expanding_quantile(x, quantile=0.5, min_periods=5),
lambda x: mom.expanding_median(x, min_periods=5),
lambda x: mom.expanding_apply(x, func=sum, min_periods=5),
lambda x: mom.rolling_count(x, window=10),
lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
lambda x: mom.rolling_max(x, window=10, min_periods=5),
lambda x: mom.rolling_min(x, window=10, min_periods=5),
lambda x: mom.rolling_sum(x, window=10, min_periods=5),
lambda x: mom.rolling_mean(x, window=10, min_periods=5),
lambda x: mom.rolling_std(x, window=10, min_periods=5),
lambda x: mom.rolling_var(x, window=10, min_periods=5),
lambda x: mom.rolling_skew(x, window=10, min_periods=5),
lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
lambda x: mom.rolling_median(x, window=10, min_periods=5),
lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
]
for f in functions:
try:
s_result = f(s)
assert_series_equal(s_result, s_expected)
df1_result = f(df1)
assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
functions = [lambda x: mom.expanding_cov(x, x, pairwise=True, min_periods=5),
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
# rolling_corr_pairwise is depracated, so the following line should be deleted
# when rolling_corr_pairwise is removed.
lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
assert_panel_equal(df1_result_panel, df1_expected_panel)
df2_result_panel = f(df2)
assert_panel_equal(df2_result_panel, df2_expected_panel)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,5], [3, 2], [3,9]], columns=['A','B'])
df1a = DataFrame([[1,5], [3,9]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_cov(df1, df2, pairwise=True)[2]
result2 = mom.expanding_cov(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_cov(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_cov(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-3., -5.], [-6., -10.]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1,2], [3, 2], [3,4]], columns=['A','B'])
df1a = DataFrame([[1,2], [3,4]], index=[0,2], columns=['A','B'])
df2 = DataFrame([[5,6], [None,None], [2,1]], columns=['X','Y'])
df2a = DataFrame([[5,6], [2,1]], index=[0,2], columns=['X','Y'])
result1 = mom.expanding_corr(df1, df2, pairwise=True)[2]
result2 = mom.expanding_corr(df1, df2a, pairwise=True)[2]
result3 = mom.expanding_corr(df1a, df2, pairwise=True)[2]
result4 = mom.expanding_corr(df1a, df2a, pairwise=True)[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]], index=['A','B'], columns=['X','Y'])
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
assert_frame_equal(result4, expected)
def test_pairwise_stats_column_names_order(self):
# GH 7738
df1s = [DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C','C']),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1.,0]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0.,1]),
DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C',1]),
DataFrame([[2.,4.],[1.,2.],[5.,2.],[8.,1.]], columns=[1,0.]),
DataFrame([[2,4.],[1,2.],[5,2.],[8,1.]], columns=[0,1.]),
DataFrame([[2,4],[1,2],[5,2],[8,1.]], columns=[1.,'X']),
]
df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X'])
s = Series([1,1,3,8])
# suppress warnings about incomparable objects, as we are deliberately testing with such column labels
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*incomparable objects.*", category=RuntimeWarning)
# DataFrame methods (which do not call _flex_binary_moment())
for f in [lambda x: x.cov(),
lambda x: x.corr(),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.columns)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=True
for f in [lambda x: mom.expanding_cov(x, pairwise=True),
lambda x: mom.expanding_corr(x, pairwise=True),
lambda x: mom.rolling_cov(x, window=3, pairwise=True),
lambda x: mom.rolling_corr(x, window=3, pairwise=True),
lambda x: mom.ewmcov(x, com=3, pairwise=True),
lambda x: mom.ewmcorr(x, com=3, pairwise=True),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with itself, pairwise=False
for f in [lambda x: mom.expanding_cov(x, pairwise=False),
lambda x: mom.expanding_corr(x, pairwise=False),
lambda x: mom.rolling_cov(x, window=3, pairwise=False),
lambda x: mom.rolling_corr(x, window=3, pairwise=False),
lambda x: mom.ewmcov(x, com=3, pairwise=False),
lambda x: mom.ewmcorr(x, com=3, pairwise=False),
]:
results = [f(df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=True
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True),
lambda x, y: mom.expanding_corr(x, y, pairwise=True),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True),
]:
results = [f(df, df2) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.items, df.index)
assert_index_equal(result.major_axis, df.columns)
assert_index_equal(result.minor_axis, df2.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
# DataFrame with another DataFrame, pairwise=False
for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False),
lambda x, y: mom.expanding_corr(x, y, pairwise=False),
lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False),
lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False),
lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False),
lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False),
]:
results = [f(df, df2) if df.columns.is_unique else None for df in df1s]
for (df, result) in zip(df1s, results):
if result is not None:
expected_index = df.index.union(df2.index)
expected_columns = df.columns.union(df2.columns)
assert_index_equal(result.index, expected_index)
assert_index_equal(result.columns, expected_columns)
else:
tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2)
tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df)
# DataFrame with a Series
for f in [lambda x, y: mom.expanding_cov(x, y),
lambda x, y: mom.expanding_corr(x, y),
lambda x, y: mom.rolling_cov(x, y, window=3),
lambda x, y: mom.rolling_corr(x, y, window=3),
lambda x, y: mom.ewmcov(x, y, com=3),
lambda x, y: mom.ewmcorr(x, y, com=3),
]:
results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s]
for (df, result) in zip(df1s, results):
assert_index_equal(result.index, df.index)
assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.assert_numpy_array_equivalent(result, results[0])
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_skew(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_skew(d, window=2)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
0.177994, 1.548824])
x = mom.rolling_skew(d, window=4)
assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = mom.rolling_kurt(d, window=5)
assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = mom.rolling_kurt(d, window=3)
assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039 , 0.19501095,
1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN,
1.224307, 2.671499])
x = mom.rolling_kurt(d, window=4)
assert_series_equal(expected, x)
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
assert_almost_equal(result[10],
static_comp(self.arr[:11]))
if preserve_nan:
assert(np.isnan(result[self._nan_locs]).all())
arr = randn(50)
if has_min_periods:
result = func(arr, min_periods=30)
assert(np.isnan(result[:29]).all())
assert_almost_equal(result[-1], static_comp(arr[:50]))
# min_periods is working correctly
result = func(arr, min_periods=15)
self.assertTrue(np.isnan(result[13]))
self.assertFalse(np.isnan(result[14]))
arr2 = randn(20)
result = func(arr2, min_periods=5)
self.assertTrue(isnull(result[3]))
self.assertTrue(notnull(result[4]))
# min_periods=0
result0 = func(arr, min_periods=0)
result1 = func(arr, min_periods=1)
assert_almost_equal(result0, result1)
else:
result = func(arr)
assert_almost_equal(result[-1], static_comp(arr[:50]))
def _check_expanding_structures(self, func):
series_result = func(self.series)
tm.assert_isinstance(series_result, Series)
frame_result = func(self.frame)
self.assertEqual(type(frame_result), DataFrame)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True,
preserve_nan=True):
self._check_expanding_ndarray(func, static_comp,
has_min_periods=has_min_periods,
has_time_rule=has_time_rule,
preserve_nan=preserve_nan)
self._check_expanding_structures(func)
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D')
assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='median')
assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0+10.0+20.0)/3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_max(series, window=1, freq='D', how='mean')
assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_min(series, window=1, freq='D')
assert_series_equal(expected, x)
def test_rolling_median_how_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0)
for i in range(1, 6)])
x = mom.rolling_median(series, window=1, freq='D')
assert_series_equal(expected, x)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
enigmampc/catalyst | catalyst/examples/marketplace/mean_reversion_by_marketcap.py | 1 | 8511 | # For this example, we're going to write a simple momentum script. When the
# stock goes up quickly, we're going to buy; when it goes down quickly, we're
# going to sell. Hopefully we'll ride the waves.
import os
import tempfile
import time
import pandas as pd
import talib
from logbook import Logger
from catalyst import run_algorithm
from catalyst.api import symbol, record, order_target_percent, get_dataset
from catalyst.exchange.utils.stats_utils import set_print_settings, \
get_pretty_stats
# We give a name to the algorithm which Catalyst will use to persist its state.
# In this example, Catalyst will create the `.catalyst/data/live_algos`
# directory. If we stop and start the algorithm, Catalyst will resume its
# state using the files included in the folder.
from catalyst.utils.paths import ensure_directory
NAMESPACE = 'mean_reversion_simple'
log = Logger(NAMESPACE)
# To run an algorithm in Catalyst, you need two functions: initialize and
# handle_data.
def initialize(context):
# This initialize function sets any data or variables that you'll use in
# your algorithm. For instance, you'll want to define the trading pair (or
# trading pairs) you want to backtest. You'll also want to define any
# parameters or values you're going to use.
# In our example, we're looking at Neo in Ether.
df = get_dataset('testmarketcap2') # type: pd.DataFrame
# Picking a specific date in our DataFrame
first_dt = df.index.get_level_values(0)[0]
# Since we use a MultiIndex with date / symbol, picking a date will
# result in a new DataFrame for the selected date with a single
# symbol index
df = df.xs(first_dt, level=0)
# Keep only the top coins by market cap
df = df.loc[df['market_cap_usd'].isin(df['market_cap_usd'].nlargest(100))]
set_print_settings()
df.sort_values(by=['market_cap_usd'], ascending=True, inplace=True)
print('the marketplace data:\n{}'.format(df))
# Pick the 5 assets with the lowest market cap for trading
quote_currency = 'eth'
exchange = context.exchanges[next(iter(context.exchanges))]
symbols = [a.symbol for a in exchange.assets
if a.start_date < context.datetime]
context.assets = []
for currency, price in df['market_cap_usd'].iteritems():
if len(context.assets) >= 5:
break
s = '{}_{}'.format(currency.decode('utf-8'), quote_currency)
if s in symbols:
context.assets.append(symbol(s))
context.base_price = None
context.current_day = None
context.RSI_OVERSOLD = 55
context.RSI_OVERBOUGHT = 60
context.CANDLE_SIZE = '5T'
context.start_time = time.time()
def handle_data(context, data):
# This handle_data function is where the real work is done. Our data is
# minute-level tick data, and each minute is called a frame. This function
# runs on each frame of the data.
# We flag the first period of each day.
# Since cryptocurrencies trade 24/7 the `before_trading_starts` handle
# would only execute once. This method works with minute and daily
# frequencies.
today = data.current_dt.floor('1D')
if today != context.current_day:
context.traded_today = dict()
context.current_day = today
# Preparing dictionaries for asset-level data points
volumes = dict()
rsis = dict()
price_values = dict()
cash = context.portfolio.cash
for asset in context.assets:
# We're computing the volume-weighted-average-price of the security
# defined above, in the context.assets variable. For this example,
# we're using three bars on the 15 min bars.
# The frequency attribute determine the bar size. We use this
# convention for the frequency alias:
# http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
prices = data.history(
asset,
fields='close',
bar_count=50,
frequency=context.CANDLE_SIZE
)
# Ta-lib calculates various technical indicator based on price and
# volume arrays.
# In this example, we are comp
rsi = talib.RSI(prices.values, timeperiod=14)
# We need a variable for the current price of the security to compare
# to the average. Since we are requesting two fields, data.current()
# returns a DataFrame with
current = data.current(asset, fields=['close', 'volume'])
price = current['close']
# If base_price is not set, we use the current value. This is the
# price at the first bar which we reference to calculate price_change.
# if asset not in context.base_price:
# context.base_price[asset] = price
#
# base_price = context.base_price[asset]
# price_change = (price - base_price) / base_price
# Tracking the relevant data
volumes[asset] = current['volume']
rsis[asset] = rsi[-1]
price_values[asset] = price
# price_changes[asset] = price_change
# We are trying to avoid over-trading by limiting our trades to
# one per day.
if asset in context.traded_today:
continue
# Exit if we cannot trade
if not data.can_trade(asset):
continue
# Another powerful built-in feature of the Catalyst backtester is the
# portfolio object. The portfolio object tracks your positions, cash,
# cost basis of specific holdings, and more. In this line, we
# calculate how long or short our position is at this minute.
pos_amount = context.portfolio.positions[asset].amount
if rsi[-1] <= context.RSI_OVERSOLD and pos_amount == 0:
log.info(
'{}: buying - price: {}, rsi: {}'.format(
data.current_dt, price, rsi[-1]
)
)
# Set a style for limit orders,
limit_price = price * 1.005
target = 1.0 / len(context.assets)
order_target_percent(
asset, target, limit_price=limit_price
)
context.traded_today[asset] = True
elif rsi[-1] >= context.RSI_OVERBOUGHT and pos_amount > 0:
log.info(
'{}: selling - price: {}, rsi: {}'.format(
data.current_dt, price, rsi[-1]
)
)
limit_price = price * 0.995
order_target_percent(
asset, 0, limit_price=limit_price
)
context.traded_today[asset] = True
# Now that we've collected all current data for this frame, we use
# the record() method to save it. This data will be available as
# a parameter of the analyze() function for further analysis.
record(
current_price=price_values,
volume=volumes,
rsi=rsis,
cash=cash,
)
def analyze(context=None, perf=None):
stats = get_pretty_stats(perf)
print('the algo stats:\n{}'.format(stats))
pass
if __name__ == '__main__':
# The execution mode: backtest or live
live = False
if live:
run_algorithm(
capital_base=0.1,
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
exchange_name='poloniex',
live=True,
algo_namespace=NAMESPACE,
quote_currency='btc',
live_graph=False,
simulate_orders=False,
stats_output=None,
)
else:
folder = os.path.join(
tempfile.gettempdir(), 'catalyst', NAMESPACE
)
ensure_directory(folder)
timestr = time.strftime('%Y%m%d-%H%M%S')
out = os.path.join(folder, '{}.p'.format(timestr))
# catalyst run -f catalyst/examples/mean_reversion_simple.py \
# -x bitfinex -s 2017-10-1 -e 2017-11-10 -c usdt -n mean-reversion \
# --data-frequency minute --capital-base 10000
run_algorithm(
capital_base=100,
data_frequency='minute',
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
exchange_name='poloniex',
algo_namespace=NAMESPACE,
quote_currency='eth',
start=pd.to_datetime('2017-10-01', utc=True),
end=pd.to_datetime('2017-10-15', utc=True),
)
log.info('saved perf stats: {}'.format(out))
| apache-2.0 |
Silmathoron/nest-simulator | pynest/examples/spatial/hill_tononi_Vp.py | 6 | 38623 | # -*- coding: utf-8 -*-
#
# hill_tononi_Vp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NEST spatial example: A case-based tutorial
===========================================
:Author: Hans Ekkehard Plesser
:Institution: Norwegian University of Life Sciences
:Version: 0.4
:Date: 21 November 2012
:Copyright: The NEST Initiative (2004)
:License: Creative Commons Attribution License
**NOTE:** The network generated by this script does generate
dynamics in which the activity of the entire system, especially
Rp and Vp oscillates with approx 5 Hz. This is different from
the full model. Deviations are due to the different model type
and the elimination of a number of connections, with no changes
to the weights.
Introduction
-------------
This tutorial shows you how to implement a simplified version of the
Hill-Tononi model of the early visual pathway using NEST. The model
is described in the paper
S. L. Hill and G. Tononi.
Modeling Sleep and Wakefulness in the Thalamocortical System.
J Neurophysiology **93**:1671-1698 (2005).
Freely available via `doi 10.1152/jn.00915.2004
<http://dx.doi.org/10.1152/jn.00915.2004>`_.
We simplify the model somewhat both to keep this tutorial a bit
shorter, and because some details of the Hill-Tononi model are not
currently supported by NEST. Simplifications include:
1. We use the ``iaf_cond_alpha`` neuron model, which is
simpler than the Hill-Tononi model.
#. As the ``iaf_cond_alpha`` neuron model only supports two
synapses (labeled "ex" and "in"), we only include AMPA and
GABA_A synapses.
#. We ignore the secondary pathway (Ts, Rs, Vs), since it adds just
more of the same from a technical point of view.
#. Synaptic delays follow a Gaussian distribution in the HT
model. This implies actually a Gaussian distributions clipped at
some small, non-zero delay, since delays must be
positive. Currently, there is a bug in the module when using clipped
Gaussian distribution. We therefore draw delays from a
uniform distribution.
#. Some further adaptations are given at the appropriate locations in
the script.
This tutorial is divided in the following sections:
Philosophy_
Discusses the philosophy applied to model implementation in this
tutorial
Preparations_
Neccessary steps to use NEST
`Configurable Parameters`_
Define adjustable network parameters
`Neuron Models`_
Define the neuron models needed by the network model
Populations_
Create Populations
`Synapse models`_
Define the synapse models used in the network model
Connections_
Create Connections
`Example simulation`_
Perform a small simulation for illustration. This
section also discusses the setup for recording.
Philosophy
-----------
A network models has two essential components: *populations* and
*projections*. We first use NEST's ``CopyModel()`` mechanism to
create specific models for all populations and subpopulations in
the network, and then create the populations using the
``Create()`` function.
We use a two-stage process to create the connections, mainly
because the same configurations are required for a number of
projections: we first define dictionaries specifying the
connections, then apply these dictionaries later.
The way in which we declare the network model here is an
example. You should not consider it the last word: we expect to see
a significant development in strategies and tools for network
descriptions in the future. The following contributions to CNS*09
seem particularly interesting
- Ralf Ansorg & Lars Schwabe. Declarative model description and
code generation for hybrid individual- and population-based
simulations of the early visual system (P57);
- Sharon Crook, R. Angus Silver, & Padraig Gleeson. Describing
and exchanging models of neurons and neuronal networks with
NeuroML (F1);
as well as the following paper which will apply in PLoS
Computational Biology shortly:
- Eilen Nordlie, Marc-Oliver Gewaltig, & Hans Ekkehard Plesser.
Towards reproducible descriptions of neuronal network models.
Preparations
-------------
Please make sure that your ``PYTHONPATH`` is set correctly, so
that Python can find the NEST Python module.
**Note:** By default, the script does not show any graphics.
Set ``SHOW_FIGURES`` to ``True`` to activate graphics.
"""
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
SHOW_FIGURES = False
if SHOW_FIGURES:
plt.ion()
else:
plt_show = plt.show
def nop(s=None, block=None):
pass
plt.show = nop
##############################################################################
# This tutorial gives a brief introduction to the ConnPlotter
# toolbox. It is by no means complete.
# Load pynest
import nest
# Make sure we start with a clean slate, even if we re-run the script
# in the same Python session.
nest.ResetKernel()
# Import math, we need Pi
import math
##############################################################################
# Configurable Parameters
# -----------------------
#
# Here we define those parameters that we take to be
# configurable. The choice of configurable parameters is obviously
# arbitrary, and in practice one would have far more configurable
# parameters. We restrict ourselves to:
#
# - Network size in neurons ``N``, each layer is ``N x N``.
# - Network size in subtended visual angle ``visSize``, in degree.
# - Temporal frequency of drifting grating input ``f_dg``, in Hz.
# - Spatial wavelength and direction of drifting grating input,
# ``lambda_dg`` and ``phi_dg``, in degree/radian.
# - Background firing rate of retinal nodes and modulation amplitude,
# ``retDC`` and ``retAC``, in Hz.
# - Simulation duration ``simtime``; actual simulation is split into
# intervals of ``sim_interval`` length, so that the network state
# can be visualized in those intervals. Times are in ms.
# - Periodic boundary conditions, ``edge_wrap``.
Params = {'N': 40,
'visSize': 8.0,
'f_dg': 2.0,
'lambda_dg': 2.0,
'phi_dg': 0.0,
'retDC': 30.0,
'retAC': 30.0,
'simtime': 100.0,
'sim_interval': 1.0,
'edge_wrap': True
}
##############################################################################
# Neuron Models
# -------------
#
# We declare models in two steps:
#
# 1. We define a dictionary specifying the NEST neuron model to use
# as well as the parameters for that model.
# #. We create three copies of this dictionary with parameters
# adjusted to the three model variants specified in Table~2 of
# Hill & Tononi (2005) (cortical excitatory, cortical inhibitory,
# thalamic)
#
# In addition, we declare the models for the stimulation and
# recording devices.
#
# The general neuron model
# ------------------------
#
# We use the ``iaf_cond_alpha`` neuron, which is an
# integrate-and-fire neuron with two conductance-based synapses which
# have alpha-function time course. Any input with positive weights
# will automatically directed to the synapse labeled ``_ex``, any
# with negative weights to the synapes labeled ``_in``. We define
# **all** parameters explicitly here, so that no information is
# hidden in the model definition in NEST. ``V_m`` is the membrane
# potential to which the model neurons will be initialized.
# The model equations and parameters for the Hill-Tononi neuron model
# are given on pp. 1677f and Tables 2 and 3 in that paper. Note some
# peculiarities and adjustments:
#
# - Hill & Tononi specify their model in terms of the membrane time
# constant, while the ``iaf_cond_alpha`` model is based on the
# membrane capcitance. Interestingly, conducantces are unitless in
# the H&T model. We thus can use the time constant directly as
# membrane capacitance.
# - The model includes sodium and potassium leak conductances. We
# combine these into a single one as follows:
#
# .. math::
#
# -g_{NaL}(V-E_{Na}) - g_{KL}(V-E_K)
# = -(g_{NaL}+g_{KL})
# \left(V-\frac{g_{NaL}E_{NaL}+g_{KL}E_K}{g_{NaL}g_{KL}}\right)
#
# - We write the resulting expressions for g_L and E_L explicitly
# below, to avoid errors in copying from our pocket calculator.
# - The paper gives a range of 1.0-1.85 for g_{KL}, we choose 1.5
# here.
# - The Hill-Tononi model has no explicit reset or refractory
# time. We arbitrarily set V_reset and t_ref.
# - The paper uses double exponential time courses for the synaptic
# conductances, with separate time constants for the rising and
# fallings flanks. Alpha functions have only a single time
# constant: we use twice the rising time constant given by Hill and
# Tononi.
# - In the general model below, we use the values for the cortical
# excitatory cells as defaults. Values will then be adapted below.
#
nest.CopyModel('iaf_cond_alpha', 'NeuronModel',
params={'C_m': 16.0,
'E_L': (0.2 * 30.0 + 1.5 * -90.0) / (0.2 + 1.5),
'g_L': 0.2 + 1.5,
'E_ex': 0.0,
'E_in': -70.0,
'V_reset': -60.0,
'V_th': -51.0,
't_ref': 2.0,
'tau_syn_ex': 1.0,
'tau_syn_in': 2.0,
'I_e': 0.0,
'V_m': -70.0})
##############################################################################
# Adaptation of models for different populations
# ----------------------------------------------
#
# We must copy the `NeuronModel` dictionary explicitly, otherwise
# Python would just create a reference.
#
# Cortical excitatory cells
# .........................
# Parameters are the same as above, so we need not adapt anything
nest.CopyModel('NeuronModel', 'CtxExNeuron')
# Cortical inhibitory cells
# .........................
nest.CopyModel('NeuronModel', 'CtxInNeuron',
params={'C_m': 8.0,
'V_th': -53.0,
't_ref': 1.0})
# Thalamic cells
# ..............
nest.CopyModel('NeuronModel', 'ThalamicNeuron',
params={'C_m': 8.0,
'V_th': -53.0,
't_ref': 1.0,
'E_in': -80.0})
##############################################################################
# Input generating nodes
# ----------------------
#
# Input is generated by sinusoidally modulate Poisson generators,
# organized in a square layer of retina nodes. These nodes require a
# slightly more complicated initialization than all other elements of
# the network:
#
# - Average firing rate ``rate``, firing rate modulation depth ``amplitude``,
# and temporal modulation frequency ``frequency`` are the same for all
# retinal nodes and are set directly below.
# - The temporal phase ``phase`` of each node depends on its position in
# the grating and can only be assigned after the retinal layer has
# been created.
nest.CopyModel('sinusoidal_poisson_generator', 'RetinaNode',
params={'amplitude': Params['retAC'],
'rate': Params['retDC'],
'frequency': Params['f_dg'],
'phase': 0.0,
'individual_spike_trains': False})
##############################################################################
# Recording nodes
# ---------------
#
# We use the ``multimeter`` device for recording from the model
# neurons. At present, ``iaf_cond_alpha`` is one of few models
# supporting ``multimeter`` recording. Support for more models will
# be added soon; until then, you need to use ``voltmeter`` to record
# from other models.
#
# We configure multimeter to record membrane potential to membrane
# potential at certain intervals to memory only. We record the node ID of
# the recorded neurons, but not the time.
nest.CopyModel('multimeter', 'RecordingNode',
params={'interval': Params['sim_interval'],
'record_from': ['V_m'],
'record_to': 'memory'})
##############################################################################
# Populations
# -----------
#
# We now create the neuron populations in the model. We define
# them in order from eye via thalamus to cortex.
#
# We first define a spatial grid defining common positions and
# parameters for all populations
layerGrid = nest.spatial.grid(shape=[Params['N'], Params['N']],
extent=[Params['visSize'], Params['visSize']],
edge_wrap=Params['edge_wrap'])
# We can pass this object to the ``positions`` argument in ``Create``
# to define the positions of the neurons.
##############################################################################
# Retina
# ------
retina = nest.Create('RetinaNode', positions=layerGrid)
# Now set phases of retinal oscillators; we create a Parameter
# which represents the phase based on the spatial properties of
# the neuron.
retina_phase = 360.0 / Params['lambda_dg'] * (math.cos(Params['phi_dg']) * nest.spatial.pos.x +
math.sin(Params['phi_dg']) * nest.spatial.pos.y)
retina.phase = retina_phase
##############################################################################
# Thalamus
# --------
#
# We first introduce specific neuron models for the thalamic relay
# cells and interneurons. These have identical properties, but by
# treating them as different populations, we can address them specifically
# when building connections.
for model_name in ('TpRelay', 'TpInter'):
nest.CopyModel('ThalamicNeuron', model_name)
# Now we can create the layers, one with relay cells,
# and one with interneurons:
TpRelay = nest.Create('TpRelay', positions=layerGrid)
TpInter = nest.Create('TpInter', positions=layerGrid)
##############################################################################
# Reticular nucleus
# -----------------
nest.CopyModel('ThalamicNeuron', 'RpNeuron')
Rp = nest.Create('RpNeuron', positions=layerGrid)
##############################################################################
# Primary visual cortex
# ---------------------
#
# We follow again the same approach as with Thalamus. We differentiate
# neuron types between layers and between pyramidal cells and
# interneurons. We have two layers for pyramidal cells, and two layers for
# interneurons for each of layers 2-3, 4, and 5-6. Finally, we need to
# differentiate between vertically and horizontally tuned populations.
# When creating the populations, we create the vertically and the
# horizontally tuned populations as separate dictionaries holding the
# layers.
for layer in ('L23', 'L4', 'L56'):
nest.CopyModel('CtxExNeuron', layer + 'pyr')
for layer in ('L23', 'L4', 'L56'):
nest.CopyModel('CtxInNeuron', layer + 'in')
name_dict = {'L23pyr': 2, 'L23in': 1,
'L4pyr': 2, 'L4in': 1,
'L56pyr': 2, 'L56in': 1}
# Now we can create the populations, suffixes h and v indicate tuning
Vp_h_layers = {}
Vp_v_layers = {}
for layer_name, num_layers in name_dict.items():
for i in range(num_layers):
Vp_h_layers['{}_{}'.format(layer_name, i)] = nest.Create(layer_name, positions=layerGrid)
Vp_v_layers['{}_{}'.format(layer_name, i)] = nest.Create(layer_name, positions=layerGrid)
##############################################################################
# Collect all populations
# -----------------------
#
# For reference purposes, e.g., printing, we collect all populations
# in a tuple:
populations = (retina, TpRelay, TpInter, Rp) + tuple(Vp_h_layers.values()) + tuple(Vp_v_layers.values())
##############################################################################
# Inspection
# ----------
#
# We can now look at the network using `PrintNodes`:
nest.PrintNodes()
# We can also try to plot a single layer in a network. All layers have
# equal positions of the nodes.
nest.PlotLayer(Rp)
plt.title('Layer Rp')
##############################################################################
# Synapse models
# =-------------
#
# Actual synapse dynamics, e.g., properties such as the synaptic time
# course, time constants, reversal potentials, are properties of
# neuron models in NEST and we set them in section `Neuron models`_
# above. When we refer to *synapse models* in NEST, we actually mean
# connectors which store information about connection weights and
# delays, as well as port numbers at the target neuron (``rport``)
# and implement synaptic plasticity. The latter two aspects are not
# relevant here.
#
# We just use NEST's ``static_synapse`` connector but copy it to
# synapse models ``AMPA`` and ``GABA_A`` for the sake of
# explicitness. Weights and delays are set as needed in section
# `Connections`_ below, as they are different from projection to
# projection. De facto, the sign of the synaptic weight decides
# whether input via a connection is handle by the ``_ex`` or the
# ``_in`` synapse.
nest.CopyModel('static_synapse', 'AMPA')
nest.CopyModel('static_synapse', 'GABA_A')
##############################################################################
# Connections
# --------------------
#
# Building connections is the most complex part of network
# construction. Connections are specified in Table 1 in the
# Hill-Tononi paper. As pointed out above, we only consider AMPA and
# GABA_A synapses here. Adding other synapses is tedious work, but
# should pose no new principal challenges. We also use a uniform in
# stead of a Gaussian distribution for the weights.
#
# The model has two identical primary visual cortex populations,
# ``Vp_v`` and ``Vp_h``, tuned to vertical and horizonal gratings,
# respectively. The *only* difference in the connection patterns
# between the two populations is the thalamocortical input to layers
# L4 and L5-6 is from a population of 8x2 and 2x8 grid locations,
# respectively. Furthermore, inhibitory connection in cortex go to
# the opposing orientation population as to the own.
#
# To save us a lot of code doubling, we thus defined properties
# dictionaries for all connections first and then use this to connect
# both populations. We follow the subdivision of connections as in
# the Hill & Tononi paper.
#
# TODO: Rewrite this note.
# **Note:** Hill & Tononi state that their model spans 8 degrees of
# visual angle and stimuli are specified according to this. On the
# other hand, all connection patterns are defined in terms of cell
# grid positions. Since the NEST defines connection patterns in terms
# of the extent given in degrees, we need to apply the following
# scaling factor to all lengths in connections:
dpc = Params['visSize'] / (Params['N'] - 1)
# We will collect all same-orientation cortico-cortical connections in
ccConnections = []
# the cross-orientation cortico-cortical connections in
ccxConnections = []
# and all cortico-thalamic connections in
ctConnections = []
##############################################################################
# Horizontal intralaminar
# -----------------------
#
# *Note:* "Horizontal" means "within the same cortical layer" in this
# case.
#
# We first define dictionaries with the (most) common properties for
# horizontal intralaminar connection. We then create copies in which
# we adapt those values that need adapting, and
horIntra_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.05*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
horIntra_syn_spec = {"synapse_model": "AMPA",
"weight": 1.0,
"delay": nest.random.uniform(min=1.75, max=2.25)}
# In a loop, we run over the sources and targets and the corresponding
# dictionaries with values that needs updating.
for conn in [{"sources": "L23pyr", "targets": "L23pyr", "conn_spec": {}},
{"sources": "L23pyr", "targets": "L23in", "conn_spec": {}},
{"sources": "L4pyr", "targets": "L4pyr", "conn_spec": {"mask": {"circular": {"radius": 7.0 * dpc}}}},
{"sources": "L4pyr", "targets": "L4in", "conn_spec": {"mask": {"circular": {"radius": 7.0 * dpc}}}},
{"sources": "L56pyr", "targets": "L56pyr", "conn_spec": {}},
{"sources": "L56pyr", "targets": "L56in", "conn_spec": {}}]:
conn_spec = horIntra_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
ccConnections.append([conn['sources'], conn['targets'], conn_spec, horIntra_syn_spec])
##############################################################################
# Vertical intralaminar
# -----------------------
# *Note:* "Vertical" means "between cortical layers" in this
# case.
#
# We proceed as above.
verIntra_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 2.0 * dpc}},
"p": nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
verIntra_syn_spec = {"synapse_model": "AMPA",
"weight": 2.0,
"delay": nest.random.uniform(min=1.75, max=2.25)}
for conn in [{"sources": "L23pyr", "targets": "L56pyr",
"syn_spec": {"weight": 1.0}},
{"sources": "L23pyr", "targets": "L23in",
"syn_spec": {"weight": 1.0}},
{"sources": "L4pyr", "targets": "L23pyr", "syn_spec": {}},
{"sources": "L4pyr", "targets": "L23in", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L23pyr", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L23in", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L4pyr", "syn_spec": {}},
{"sources": "L56pyr", "targets": "L4in", "syn_spec": {}}]:
syn_spec = verIntra_syn_spec.copy()
syn_spec.update(conn['syn_spec'])
ccConnections.append([conn['sources'], conn['targets'], verIntra_conn_spec, syn_spec])
##############################################################################
# Intracortical inhibitory
# ------------------------
#
# We proceed as above, with the following difference: each connection
# is added to both the same-orientation and the cross-orientation list of
# connections.
#
# **Note:** Weights increased from -1.0 to -2.0, to make up for missing GabaB
#
# Note that we have to specify the **weight with negative sign** to make
# the connections inhibitory.
intraInh_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 7.0 * dpc}},
"p": 0.25*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
intraInh_syn_spec = {"synapse_model": "GABA_A",
"weight": -2.0,
"delay": nest.random.uniform(min=1.75, max=2.25)}
for conn in [{"sources": "L23in", "targets": "L23pyr", "conn_spec": {}},
{"sources": "L23in", "targets": "L23in", "conn_spec": {}},
{"sources": "L4in", "targets": "L4pyr", "conn_spec": {}},
{"sources": "L4in", "targets": "L4in", "conn_spec": {}},
{"sources": "L56in", "targets": "L56pyr", "conn_spec": {}},
{"sources": "L56in", "targets": "L56in", "conn_spec": {}}]:
conn_spec = intraInh_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
ccConnections.append([conn['sources'], conn['targets'], conn_spec, intraInh_syn_spec])
ccxConnections.append([conn['sources'], conn['targets'], conn_spec, intraInh_syn_spec])
##############################################################################
# Corticothalamic
# ---------------
# We proceed as above.
corThal_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 5.0 * dpc}},
"p": 0.5*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5 * dpc)}
corThal_syn_spec = {"synapse_model": "AMPA",
"weight": 1.0,
"delay": nest.random.uniform(min=7.5, max=8.5)}
for conn in [{"sources": "L56pyr", "conn_spec": {}}]:
conn_spec = intraInh_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
syn_spec = intraInh_syn_spec.copy()
ctConnections.append([conn['sources'], conn_spec, syn_spec])
##############################################################################
# Corticoreticular
# ----------------
#
# In this case, there is only a single connection, so we define the
# dictionaries directly; it is very similar to corThal, and to show that,
# we copy first, then update.
corRet = corThal_conn_spec.copy()
corRet_syn_spec = corThal_syn_spec.copy()
corRet_syn_spec.update({"weight": 2.5})
##############################################################################
# Build all connections beginning in cortex
# -----------------------------------------
#
# Cortico-cortical, same orientation
print("Connecting: cortico-cortical, same orientation")
for source, target, conn_spec, syn_spec in ccConnections:
for src_i in range(name_dict[source]):
for tgt_i in range(name_dict[target]):
source_name = '{}_{}'.format(source, src_i)
target_name = '{}_{}'.format(target, tgt_i)
nest.Connect(Vp_h_layers[source_name], Vp_h_layers[target_name], conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], Vp_v_layers[target_name], conn_spec, syn_spec)
# Cortico-cortical, cross-orientation
print("Connecting: cortico-cortical, other orientation")
for source, target, conn_spec, syn_spec in ccxConnections:
for src_i in range(name_dict[source]):
for tgt_i in range(name_dict[target]):
source_name = '{}_{}'.format(source, src_i)
target_name = '{}_{}'.format(target, tgt_i)
nest.Connect(Vp_h_layers[source_name], Vp_v_layers[target_name], conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], Vp_h_layers[target_name], conn_spec, syn_spec)
# Cortico-thalamic connections
print("Connecting: cortico-thalamic")
for source, conn_spec, syn_spec in ctConnections:
for src_i in range(name_dict[source]):
source_name = '{}_{}'.format(source, src_i)
nest.Connect(Vp_h_layers[source_name], TpRelay, conn_spec, syn_spec)
nest.Connect(Vp_h_layers[source_name], TpInter, conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], TpRelay, conn_spec, syn_spec)
nest.Connect(Vp_v_layers[source_name], TpInter, conn_spec, syn_spec)
for src_i in range(name_dict['L56pyr']):
source_name = 'L56pyr_{}'.format(src_i)
nest.Connect(Vp_h_layers[source_name], Rp, corRet, corRet_syn_spec)
nest.Connect(Vp_v_layers[source_name], Rp, corRet, corRet_syn_spec)
##############################################################################
# Thalamo-cortical connections
# ----------------------------
#
# **Note:** According to the text on p. 1674, bottom right, of the Hill &
# Tononi paper, thalamocortical connections are created by selecting from
# the thalamic population for each L4 pyramidal cell. We must therefore
# specify that we want to select from the source neurons.
#
# We first handle the rectangular thalamocortical connections.
thalCorRect_conn_spec = {"rule": "pairwise_bernoulli",
"use_on_source": True}
thalCorRect_syn_spec = {"synapse_model": "AMPA",
"weight": 5.0,
"delay": nest.random.uniform(min=2.75, max=3.25)}
print("Connecting: thalamo-cortical")
# Horizontally tuned
thalCorRect_conn_spec.update(
{"mask": {"rectangular": {"lower_left": [-4.0 * dpc, -1.0 * dpc],
"upper_right": [4.0 * dpc, 1.0 * dpc]}}})
for conn in [{"targets": "L4pyr", "conn_spec": {"p": 0.5}},
{"targets": "L56pyr", "conn_spec": {"p": 0.3}}]:
conn_spec = thalCorRect_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
for trg_i in range(name_dict[conn['targets']]):
target_name = '{}_{}'.format(conn['targets'], trg_i)
nest.Connect(
TpRelay, Vp_h_layers[target_name], conn_spec, thalCorRect_syn_spec)
# Vertically tuned
thalCorRect_conn_spec.update(
{"mask": {"rectangular": {"lower_left": [-1.0 * dpc, -4.0 * dpc],
"upper_right": [1.0 * dpc, 4.0 * dpc]}}})
for conn in [{"targets": "L4pyr", "conn_spec": {"p": 0.5}},
{"targets": "L56pyr", "conn_spec": {"p": 0.3}}]:
conn_spec = thalCorRect_conn_spec.copy()
conn_spec.update(conn['conn_spec'])
for trg_i in range(name_dict[conn['targets']]):
target_name = '{}_{}'.format(conn['targets'], trg_i)
nest.Connect(
TpRelay, Vp_v_layers[target_name], conn_spec, thalCorRect_syn_spec)
# Diffuse connections
thalCorDiff_conn_spec = {"rule": "pairwise_bernoulli",
"use_on_source": True,
"mask": {"circular": {"radius": 5.0 * dpc}},
"p": 0.1*nest.spatial_distributions.gaussian(nest.spatial.distance, std=7.5*dpc)}
thalCorDiff_syn_spec = {"synapse_model": "AMPA",
"weight": 5.0,
"delay": nest.random.uniform(min=2.75, max=3.25)}
for conn in [{"targets": "L4pyr"},
{"targets": "L56pyr"}]:
for trg_i in range(name_dict[conn['targets']]):
target_name = '{}_{}'.format(conn['targets'], trg_i)
nest.Connect(TpRelay, Vp_h_layers[target_name], thalCorDiff_conn_spec, thalCorDiff_syn_spec)
nest.Connect(TpRelay, Vp_v_layers[target_name], thalCorDiff_conn_spec, thalCorDiff_syn_spec)
##############################################################################
# Thalamic connections
# --------------------
#
# Connections inside thalamus, including Rp.
#
# *Note:* In Hill & Tononi, the inhibition between Rp cells is mediated by
# GABA_B receptors. We use GABA_A receptors here to provide some
# self-dampening of Rp.
#
# **Note 1:** The following code had a serious bug in v. 0.1: During the first
# iteration of the loop, "synapse_model" and "weights" were set to "AMPA" and
# "0.1", respectively and remained unchanged, so that all connections were
# created as excitatory connections, even though they should have been
# inhibitory. We now specify synapse_model and weight explicitly for each
# connection to avoid this.
#
# **Note 2:** The following code also had a serious bug in v. 0.4: In the
# loop the connection dictionary would be updated directly, i.e. without
# making a copy. This lead to the entry ``'sources': 'TpInter'`` being
# left in the dictionary when connecting with ``Rp`` sources. Therefore no
# connections for the connections with ``Rp`` as source would be created
# here.
thal_conn_spec = {"rule": "pairwise_bernoulli"}
thal_syn_spec = {"delay": nest.random.uniform(min=1.75, max=2.25)}
print("Connecting: intra-thalamic")
for src, tgt, conn, syn in [(TpRelay, Rp,
{"mask": {"circular": {"radius": 2.0 * dpc}},
"p": nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "AMPA",
"weight": 2.0}),
(TpInter, TpRelay,
{"mask": {"circular": {"radius": 2.0 * dpc}},
"p": 0.25*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A",
"weight": -1.0}),
(TpInter, TpInter,
{"mask": {"circular": {"radius": 2.0 * dpc}},
"p": 0.25*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0}),
(Rp, TpRelay, {"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.15*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0}),
(Rp, TpInter, {"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.15*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0}),
(Rp, Rp, {"mask": {"circular": {"radius": 12.0 * dpc}},
"p": 0.5*nest.spatial_distributions.gaussian(
nest.spatial.distance, std=7.5*dpc)},
{"synapse_model": "GABA_A", "weight": -1.0})
]:
conn_spec = thal_conn_spec.copy()
conn_spec.update(conn)
syn_spec = thal_syn_spec.copy()
syn_spec.update(syn)
nest.Connect(src, tgt, conn_spec, syn_spec)
##############################################################################
# Thalamic input
# --------------
#
# Input to the thalamus from the retina.
#
# **Note:** Hill & Tononi specify a delay of 0 ms for this connection.
# We use 1 ms here.
retThal_conn_spec = {"rule": "pairwise_bernoulli",
"mask": {"circular": {"radius": 1.0 * dpc}},
"p": 0.75*nest.spatial_distributions.gaussian(nest.spatial.distance, std=2.5*dpc)}
retThal_syn_spec = {"weight": 10.0,
"delay": 1.0,
"synapse_model": "AMPA"}
print("Connecting: retino-thalamic")
nest.Connect(retina, TpRelay, retThal_conn_spec, retThal_syn_spec)
nest.Connect(retina, TpInter, retThal_conn_spec, retThal_syn_spec)
##############################################################################
# Checks on connections
# ---------------------
#
# As a very simple check on the connections created, we inspect
# the connections from the central node of various layers.
# Connections from Retina to TpRelay
retina_ctr_node_id = nest.FindCenterElement(retina)
retina_ctr_index = retina.index(retina_ctr_node_id.global_id)
conns = nest.GetConnections(retina[retina_ctr_index], TpRelay)
nest.PlotTargets(retina[retina_ctr_index], TpRelay, 'AMPA')
plt.title('Connections Retina -> TpRelay')
# Connections from TpRelay to L4pyr in Vp (horizontally tuned)
TpRelay_ctr_node_id = nest.FindCenterElement(TpRelay)
TpRelay_ctr_index = TpRelay.index(TpRelay_ctr_node_id.global_id)
nest.PlotTargets(TpRelay[TpRelay_ctr_index], Vp_h_layers['L4pyr_0'], 'AMPA')
plt.title('Connections TpRelay -> Vp(h) L4pyr')
# Connections from TpRelay to L4pyr in Vp (vertically tuned)
nest.PlotTargets(TpRelay[TpRelay_ctr_index], Vp_v_layers['L4pyr_0'], 'AMPA')
plt.title('Connections TpRelay -> Vp(v) L4pyr')
# Block until the figures are closed before we continue.
plt.show(block=True)
##############################################################################
# Recording devices
# -----------------
#
# This recording device setup is a bit makeshift. For each population
# we want to record from, we create one ``multimeter``, then select
# all nodes of the right model from the target population and
# connect. ``loc`` is the subplot location for the layer.
print("Connecting: Recording devices")
recorders = {}
for name, loc, population in [('TpRelay', 0, TpRelay),
('Rp', 1, Rp),
('Vp_v L4pyr 1', 2, Vp_v_layers['L4pyr_0']),
('Vp_v L4pyr 2', 3, Vp_v_layers['L4pyr_1']),
('Vp_h L4pyr 1', 4, Vp_h_layers['L4pyr_0']),
('Vp_h L4pyr 2', 5, Vp_h_layers['L4pyr_1'])]:
recorders[name] = (nest.Create('RecordingNode'), loc)
# one recorder to all targets
nest.Connect(recorders[name][0], population)
##############################################################################
# Example simulation
# --------------------
#
# This simulation is set up to create a step-wise visualization of
# the membrane potential. To do so, we simulate ``sim_interval``
# milliseconds at a time, then read out data from the multimeters,
# clear data from the multimeters and plot the data as pseudocolor
# plots.
# show time during simulation
nest.SetKernelStatus({'print_time': True})
# lower and upper limits for color scale, for each of the
# populations recorded.
vmn = [-80, -80, -80, -80, -80, -80]
vmx = [-50, -50, -50, -50, -50, -50]
nest.Simulate(Params['sim_interval'])
# Set up the figure, assume six recorders.
fig, axes = plt.subplots(2, 3)
images = []
for i, ax in enumerate(axes.flat):
# We initialize with an empty image
images.append(ax.imshow([[0.]], aspect='equal', interpolation='nearest',
extent=(0, Params['N'] + 1, 0, Params['N'] + 1),
vmin=vmn[i], vmax=vmx[i], cmap='plasma'))
fig.colorbar(images[-1], ax=ax)
# loop over simulation intervals
for t in np.arange(0, Params['simtime'], Params['sim_interval']):
# do the simulation
nest.Simulate(Params['sim_interval'])
# now plot data from each recorder in turn
for name, rec_item in recorders.items():
recorder, subplot_pos = rec_item
ax = axes.flat[subplot_pos]
im = images[subplot_pos]
d = recorder.get('events', 'V_m')
# clear data from multimeter
recorder.n_events = 0
# update image data and title
im.set_data(np.reshape(d, (Params['N'], Params['N'])))
ax.set_title(name + ', t = %6.1f ms' % nest.GetKernelStatus()['time'])
# We need to pause because drawing of the figure happens while the main code is sleeping
plt.pause(0.0001)
# just for some information at the end
pprint(nest.GetKernelStatus())
| gpl-2.0 |
proyan/sot-torque-control | python/dynamic_graph/sot/torque_control/identification/identify_motor_low_level.py | 1 | 3988 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from dynamic_graph.sot.torque_control.hrp2.control_manager_conf import IN_OUT_GAIN
def identify_motor_low_level(dq, ctrl, current):
DZ = 0.4
K3 = 1.0#1.06
mask=abs(K3*ctrl/IN_OUT_GAIN-current)<DZ
times = np.arange(len(dq))*0.001
plt.figure()
plt.subplot(411)
plt.plot(times,current ,'.')
plt.plot(times[mask],current[mask],'.')
plt.title('current')
plt.subplot(412)
plt.plot(times,dq ,'.')
plt.plot(times[mask],dq[mask],'.')
plt.title('dq')
plt.subplot(413)
plt.plot(times,ctrl ,'.')
plt.plot(times[mask],ctrl[mask],'.')
plt.title(ctrl)
plt.title('ctrl')
plt.subplot(414)
plt.plot(times,K3*ctrl/IN_OUT_GAIN-current ,'.')
plt.plot(times[mask],K3*ctrl[mask]/IN_OUT_GAIN-current[mask],'.')
plt.title(ctrl)
plt.title('ctrl-current ')
plt.show()
#~ embed()
fig = plt.figure()
ax = fig.gca(projection='3d')
NDZ= 100
NK3= 100
lDZs = np.linspace(0.3,0.5,NDZ)
lK3s = np.linspace(0.95,1.2,NK3)
DZs, K3s = np.meshgrid(lDZs, lK3s)
cost=np.zeros([NDZ,NK3])
for i in range(NDZ):
# print str(int(1000*i/NDZ)/10.0) + '%'
for j in range(NK3):
#~ print (i,j)
#~ if i == 50: embed()
DZ=lDZs[i]
K3=lK3s[j]
mask=abs(K3*ctrl/IN_OUT_GAIN-current) < DZ
# not_mask = np.logical_not(mask)
#plt.plot(current[mask],dq[mask],'.')
cost[i,j]=-np.corrcoef(current[mask],dq[mask])[0,1] #* np.sum(not_mask) / (np.sum(mask) + np.sum(not_mask))
#~ embed()
#~ cost[i,j]+=np.corrcoef(ctrl[not_mask],dq[not_mask])[0,1] * np.sum(mask) / (np.sum(mask) + np.sum(not_mask))
#~ cost[i,j]+=np.corrcoef(ctrl[not_mask],current[not_mask])[0,1] * np.sum(mask) / (np.sum(mask) + np.sum(not_mask))
#~ cost[i,j]=cost[i,j]/3.0
#~ ax.scatter(DZ, K3, cost,cmap=cm.coolwarm)
surf = ax.plot_surface(DZs, K3s, cost.T, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(np.min(cost), 1.01)
ax.zaxis.set_major_locator(LinearLocator(2))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
#plot a particular case
DZ=0.4
K3=1.0
mask=abs(K3*ctrl/IN_OUT_GAIN-current) < DZ
plt.xlabel('current')
plt.ylabel('dq')
plt.plot(current[mask],dq[mask],'.')
plt.show()
#plot the optimum
iDZ,iK3 = np.unravel_index(np.argmax(cost),cost.shape)
DZ=lDZs[iDZ]
K3=lK3s[iK3]
print 'DZ = ' + str(DZ)
print 'K3 = ' + str(K3)
mask=abs(K3*ctrl/IN_OUT_GAIN-current) < DZ
plt.xlabel('current')
plt.ylabel('dq')
plt.plot(current[mask],dq[mask],'.')
print -np.corrcoef(current[mask],dq[mask])[0,1]
print cost[iDZ,iK3]
plt.show()
#~ maskInDZ=abs(ctrl/IN_OUT_GAIN) < 0.5
#~ tt=np.arange(maskInDZ.size)
#~ plt.plot(tt,ctrl/IN_OUT_GAIN,'.')
#~ plt.plot(tt,current,'.')
#~ plt.plot(tt[maskInDZ],ctrl[maskInDZ]/IN_OUT_GAIN,'.')
#~ (v1min,v1max) = ( 0.7 , 0.9)
#~ (v2min,v2max) = (0.25 , 0.3)
#~ (v3min,v3max) = (0.18 , 0.21)
#~
#~ maskv1= np.logical_and( dq > v1min , dq <v1max )
#~ maskv2= np.logical_and( dq > v2min , dq <v2max )
#~ maskv3= np.logical_and( dq > v3min , dq <v3max )
#~
#~ plt.plot(ctrl[maskv1] /IN_OUT_GAIN,current[maskv1],'xr')
#~ plt.plot(ctrl[maskv2] /IN_OUT_GAIN,current[maskv2],'xg')
#~ plt.plot(ctrl[maskv3] /IN_OUT_GAIN,current[maskv3],'xy')
#~ plt.show()
| gpl-3.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/test_testing.py | 2 | 26599 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import unittest
import nose
import numpy as np
import sys
from pandas import Series, DataFrame
import pandas.util.testing as tm
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
raise_with_traceback, assert_index_equal,
assert_series_equal, assert_frame_equal,
assert_numpy_array_equal,
RNGContext, assertRaises,
skip_if_no_package_deco)
from pandas.compat import is_platform_windows
# let's get meta.
class TestAssertAlmostEqual(tm.TestCase):
_multiprocess_can_split_ = True
def _assert_almost_equal_both(self, a, b, **kwargs):
assert_almost_equal(a, b, **kwargs)
assert_almost_equal(b, a, **kwargs)
def _assert_not_almost_equal_both(self, a, b, **kwargs):
self.assertRaises(AssertionError, assert_almost_equal, a, b, **kwargs)
self.assertRaises(AssertionError, assert_almost_equal, b, a, **kwargs)
def test_assert_almost_equal_numbers(self):
self._assert_almost_equal_both(1.1, 1.1)
self._assert_almost_equal_both(1.1, 1.100001)
self._assert_almost_equal_both(np.int16(1), 1.000001)
self._assert_almost_equal_both(np.float64(1.1), 1.1)
self._assert_almost_equal_both(np.uint32(5), 5)
self._assert_not_almost_equal_both(1.1, 1)
self._assert_not_almost_equal_both(1.1, True)
self._assert_not_almost_equal_both(1, 2)
self._assert_not_almost_equal_both(1.0001, np.int16(1))
def test_assert_almost_equal_numbers_with_zeros(self):
self._assert_almost_equal_both(0, 0)
self._assert_almost_equal_both(0.000001, 0)
self._assert_not_almost_equal_both(0.001, 0)
self._assert_not_almost_equal_both(1, 0)
def test_assert_almost_equal_numbers_with_mixed(self):
self._assert_not_almost_equal_both(1, 'abc')
self._assert_not_almost_equal_both(1, [1, ])
self._assert_not_almost_equal_both(1, object())
def test_assert_almost_equal_edge_case_ndarrays(self):
self._assert_almost_equal_both(np.array([], dtype='M8[ns]'),
np.array([], dtype='float64'))
self._assert_almost_equal_both(np.array([], dtype=str),
np.array([], dtype='int64'))
def test_assert_almost_equal_dicts(self):
self._assert_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self._assert_not_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 3})
self._assert_not_almost_equal_both(
{'a': 1, 'b': 2}, {'a': 1, 'b': 2, 'c': 3}
)
self._assert_not_almost_equal_both({'a': 1}, 1)
self._assert_not_almost_equal_both({'a': 1}, 'abc')
self._assert_not_almost_equal_both({'a': 1}, [1, ])
def test_assert_almost_equal_dict_like_object(self):
class DictLikeObj(object):
def keys(self):
return ('a', )
def __getitem__(self, item):
if item == 'a':
return 1
self._assert_almost_equal_both({'a': 1}, DictLikeObj())
self._assert_not_almost_equal_both({'a': 2}, DictLikeObj())
def test_assert_almost_equal_strings(self):
self._assert_almost_equal_both('abc', 'abc')
self._assert_not_almost_equal_both('abc', 'abcd')
self._assert_not_almost_equal_both('abc', 'abd')
self._assert_not_almost_equal_both('abc', 1)
self._assert_not_almost_equal_both('abc', [1, ])
def test_assert_almost_equal_iterables(self):
self._assert_almost_equal_both([1, 2, 3], [1, 2, 3])
self._assert_almost_equal_both(np.array([1, 2, 3]), [1, 2, 3])
# Can't compare generators
self._assert_not_almost_equal_both(iter([1, 2, 3]), [1, 2, 3])
self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 4])
self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 3, 4])
self._assert_not_almost_equal_both([1, 2, 3], 1)
def test_assert_almost_equal_null(self):
self._assert_almost_equal_both(None, None)
self._assert_almost_equal_both(None, np.NaN)
self._assert_not_almost_equal_both(None, 0)
self._assert_not_almost_equal_both(np.NaN, 0)
def test_assert_almost_equal_inf(self):
self._assert_almost_equal_both(np.inf, np.inf)
self._assert_almost_equal_both(np.inf, float("inf"))
self._assert_not_almost_equal_both(np.inf, 0)
def test_assert_almost_equal_pandas(self):
self.assert_almost_equal(pd.Index([1., 1.1]),
pd.Index([1., 1.100001]))
self.assert_almost_equal(pd.Series([1., 1.1]),
pd.Series([1., 1.100001]))
self.assert_almost_equal(pd.DataFrame({'a': [1., 1.1]}),
pd.DataFrame({'a': [1., 1.100001]}))
class TestUtilTesting(tm.TestCase):
_multiprocess_can_split_ = True
def test_raise_with_traceback(self):
with assertRaisesRegexp(LookupError, "error_text"):
try:
raise ValueError("THIS IS AN ERROR")
except ValueError as e:
e = LookupError("error_text")
raise_with_traceback(e)
with assertRaisesRegexp(LookupError, "error_text"):
try:
raise ValueError("This is another error")
except ValueError:
e = LookupError("error_text")
_, _, traceback = sys.exc_info()
raise_with_traceback(e, traceback)
class TestAssertNumpyArrayEqual(tm.TestCase):
def test_numpy_array_equal_message(self):
if is_platform_windows():
raise nose.SkipTest("windows has incomparable line-endings "
"and uses L on the shape")
expected = """numpy array are different
numpy array shapes are different
\\[left\\]: \\(2,\\)
\\[right\\]: \\(3,\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]))
# scalar comparison
expected = """: 1 != 2"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(1, 2)
expected = """expected 2\\.00000 but got 1\\.00000, with decimal 5"""
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(1, 2)
# array / scalar array comparison
expected = """(numpy array|Iterable) are different
First object is iterable, second isn't
\\[left\\]: \\[1\\]
\\[right\\]: 1"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(np.array([1]), 1)
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(np.array([1]), 1)
# scalar / array comparison
expected = """(numpy array|Iterable) are different
Second object is iterable, first isn't
\\[left\\]: 1
\\[right\\]: \\[1\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(1, np.array([1]))
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(1, np.array([1]))
expected = """numpy array are different
numpy array values are different \\(66\\.66667 %\\)
\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(
np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(
np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([1, 3]))
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1\\.1, 2\\.000001\\]
\\[right\\]: \\[1\\.1, 2.0\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(
np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
# must pass
assert_almost_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
expected = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
expected = """numpy array are different
numpy array values are different \\(25\\.0 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
# allow to overwrite message
expected = """Index are different
Index shapes are different
\\[left\\]: \\(2,\\)
\\[right\\]: \\(3,\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]),
obj='Index')
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]),
obj='Index')
def test_assert_almost_equal_iterable_message(self):
expected = """Iterable are different
Iterable length are different
\\[left\\]: 2
\\[right\\]: 3"""
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal([1, 2], [3, 4, 5])
expected = """Iterable are different
Iterable values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_almost_equal([1, 2], [1, 3])
class TestAssertIndexEqual(unittest.TestCase):
_multiprocess_can_split_ = True
def test_index_equal_message(self):
expected = """Index are different
Index levels are different
\\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 2, MultiIndex\\(levels=\\[\\[u?'A', u?'B'\\], \\[1, 2, 3, 4\\]\\],
labels=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4
)])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=False)
expected = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2), ('B', 3), ('B', 4
)])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4
)])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index length are different
\\[left\\]: 3, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 4, Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3, 4])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index classes are different
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Float64Index\\(\\[1\\.0, 2\\.0, 3\\.0\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3.0])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=True)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=True, check_exact=False)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3.])
idx2 = pd.Index([1, 2, 3.0000000001])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
# must success
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3.])
idx2 = pd.Index([1, 2, 3.0001])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
# must success
assert_index_equal(idx1, idx2, check_exact=False,
check_less_precise=True)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 4])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, check_less_precise=True)
expected = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2), ('B', 3), ('B', 4
)])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2), ('B', 3), ('B', 4
)])
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
def test_index_equal_metadata_message(self):
expected = """Index are different
Attribute "names" are different
\\[left\\]: \\[None\\]
\\[right\\]: \\[u?'x'\\]"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3], name='x')
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
# same name, should pass
assert_index_equal(pd.Index([1, 2, 3], name=np.nan),
pd.Index([1, 2, 3], name=np.nan))
assert_index_equal(pd.Index([1, 2, 3], name=pd.NaT),
pd.Index([1, 2, 3], name=pd.NaT))
expected = """Index are different
Attribute "names" are different
\\[left\\]: \\[nan\\]
\\[right\\]: \\[NaT\\]"""
idx1 = pd.Index([1, 2, 3], name=np.nan)
idx2 = pd.Index([1, 2, 3], name=pd.NaT)
with assertRaisesRegexp(AssertionError, expected):
assert_index_equal(idx1, idx2)
class TestAssertSeriesEqual(tm.TestCase):
_multiprocess_can_split_ = True
def _assert_equal(self, x, y, **kwargs):
assert_series_equal(x, y, **kwargs)
assert_series_equal(y, x, **kwargs)
def _assert_not_equal(self, a, b, **kwargs):
self.assertRaises(AssertionError, assert_series_equal, a, b, **kwargs)
self.assertRaises(AssertionError, assert_series_equal, b, a, **kwargs)
def test_equal(self):
self._assert_equal(Series(range(3)), Series(range(3)))
self._assert_equal(Series(list('abc')), Series(list('abc')))
def test_not_equal(self):
self._assert_not_equal(Series(range(3)), Series(range(3)) + 1)
self._assert_not_equal(Series(list('abc')), Series(list('xyz')))
self._assert_not_equal(Series(range(3)), Series(range(4)))
self._assert_not_equal(
Series(range(3)), Series(
range(3), dtype='float64'))
self._assert_not_equal(
Series(range(3)), Series(
range(3), index=[1, 2, 4]))
# ATM meta data is not checked in assert_series_equal
# self._assert_not_equal(Series(range(3)),Series(range(3),name='foo'),check_names=True)
def test_less_precise(self):
s1 = Series([0.12345], dtype='float64')
s2 = Series([0.12346], dtype='float64')
self.assertRaises(AssertionError, assert_series_equal, s1, s2)
self._assert_equal(s1, s2, check_less_precise=True)
s1 = Series([0.12345], dtype='float32')
s2 = Series([0.12346], dtype='float32')
self.assertRaises(AssertionError, assert_series_equal, s1, s2)
self._assert_equal(s1, s2, check_less_precise=True)
# even less than less precise
s1 = Series([0.1235], dtype='float32')
s2 = Series([0.1236], dtype='float32')
self.assertRaises(AssertionError, assert_series_equal, s1, s2)
self.assertRaises(AssertionError, assert_series_equal, s1, s2, True)
def test_index_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
def test_multiindex_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
def test_series_equal_message(self):
expected = """Series are different
Series length are different
\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 3, 4]))
expected = """Series are different
Series values are different \\(33\\.33333 %\\)
\\[left\\]: \\[1, 2, 3\\]
\\[right\\]: \\[1, 2, 4\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]))
with assertRaisesRegexp(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]),
check_less_precise=True)
class TestAssertFrameEqual(tm.TestCase):
_multiprocess_can_split_ = True
def _assert_equal(self, x, y, **kwargs):
assert_frame_equal(x, y, **kwargs)
assert_frame_equal(y, x, **kwargs)
def _assert_not_equal(self, a, b, **kwargs):
self.assertRaises(AssertionError, assert_frame_equal, a, b, **kwargs)
self.assertRaises(AssertionError, assert_frame_equal, b, a, **kwargs)
def test_index_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])
self._assert_not_equal(df1, df2, check_index_type=True)
def test_multiindex_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
self._assert_not_equal(df1, df2, check_index_type=True)
def test_empty_dtypes(self):
df1 = pd.DataFrame(columns=["col1", "col2"])
df1["col1"] = df1["col1"].astype('int64')
df2 = pd.DataFrame(columns=["col1", "col2"])
self._assert_equal(df1, df2, check_dtype=False)
self._assert_not_equal(df1, df2, check_dtype=True)
def test_frame_equal_message(self):
expected = """DataFrame are different
DataFrame shape \\(number of rows\\) are different
\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3]}),
pd.DataFrame({'A': [1, 2, 3, 4]}))
expected = """DataFrame are different
DataFrame shape \\(number of columns\\) are different
\\[left\\]: 2, Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
\\[right\\]: 1, Index\\(\\[u?'A'\\], dtype='object'\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3]}))
expected = """DataFrame\\.index are different
DataFrame\\.index values are different \\(33\\.33333 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b', u?'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'a', u?'b', u?'d'\\], dtype='object'\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c']),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'd']))
expected = """DataFrame\\.columns are different
DataFrame\\.columns values are different \\(50\\.0 %\\)
\\[left\\]: Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'A', u?'b'\\], dtype='object'\\)"""
with assertRaisesRegexp(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c']),
pd.DataFrame({'A': [1, 2, 3], 'b': [4, 5, 6]},
index=['a', 'b', 'c']))
expected = """DataFrame\\.iloc\\[:, 1\\] are different
DataFrame\\.iloc\\[:, 1\\] values are different \\(33\\.33333 %\\)
\\[left\\]: \\[4, 5, 6\\]
\\[right\\]: \\[4, 5, 7\\]"""
with assertRaisesRegexp(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}))
with assertRaisesRegexp(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}),
by_blocks=True)
class TestRNGContext(unittest.TestCase):
def test_RNGContext(self):
expected0 = 1.764052345967664
expected1 = 1.6243453636632417
with RNGContext(0):
with RNGContext(1):
self.assertEqual(np.random.randn(), expected1)
self.assertEqual(np.random.randn(), expected0)
class TestDeprecatedTests(tm.TestCase):
def test_warning(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertEquals(1, 1)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertNotEquals(1, 2)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assert_(True)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertAlmostEquals(1.0, 1.0000000001)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertNotAlmostEquals(1, 2)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_isinstance(Series([1, 2]), Series, msg='xxx')
class TestLocale(tm.TestCase):
def test_locale(self):
if sys.platform == 'win32':
raise nose.SkipTest(
"skipping on win platforms as locale not available")
# GH9744
locales = tm.get_locales()
self.assertTrue(len(locales) >= 1)
def test_skiptest_deco():
from nose import SkipTest
@skip_if_no_package_deco("fakepackagename")
def f():
pass
with assertRaises(SkipTest):
f()
@skip_if_no_package_deco("numpy")
def f():
pass
# hack to ensure that SkipTest is *not* raised
with assertRaises(ValueError):
f()
raise ValueError
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
jamesp/shallowwater | beta_plane/geostrophic_adjustment.py | 1 | 1417 | import numpy as np
import matplotlib.pyplot as plt
from shallowwater import LinearShallowWater, PeriodicBoundaries
class ShallowWater(LinearShallowWater, PeriodicBoundaries): pass
nx = 128
ny = 129
Lx = 1.5e7
Ly = 1.5e7
f0 = 4e-5
beta = 0.0
phi0 = 100.0
g = 10.0
c = 30.0 # Kelvin wave speed c = sqrt(gH)
H = c**2 / g # Set the height to get right speed waves
nu = 1.0e3
dx = float(Lx) / nx
dt = 0.8 * dx / (c*4)
import contextlib
plt.ion()
@contextlib.contextmanager
def iplot(fignum=1):
plt.figure(fignum)
plt.clf()
yield
plt.pause(0.001)
plt.draw()
sw = ShallowWater(nx, ny, Lx, Ly, beta=beta, f0=f0, g=g, H=H, nu=nu, dt=dt)
d = 20
hump = (np.sin(np.arange(0, np.pi, np.pi/(2*d)))**2)[np.newaxis, :] * (np.sin(np.arange(0, np.pi, np.pi/(2*d)))**2)[:, np.newaxis]
sw.h[nx//2:nx//2+2*d, ny//2-d:ny//2+d] = hump*1.0
plt.figure(1, figsize=(8,8))
for i in range(10000):
sw.step()
if i % 10 == 0:
with iplot(1):
plt.subplot(221)
plt.imshow(sw.h.T, cmap=plt.cm.RdBu_r)
plt.clim(-.5, .5)
plt.title('h')
plt.subplot(223)
div = sw.divergence()
maxdiv = np.max(np.abs(div))
plt.imshow(div.T, cmap=plt.cm.RdBu_r)
plt.clim(-maxdiv, maxdiv)
plt.title('divergence')
plt.subplot(224)
plt.plot(sw.H+sw.h[:, ny//2])
| mit |
ville-k/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 47 | 5877 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op)
return _model_fn
| apache-2.0 |
jeffakolb/Data-Science-45min-Intros | Bokeh/bubble_plot.py | 7 | 6845 | from collections import OrderedDict
import pandas as pd
import numpy as np
from jinja2 import Template
from bokeh.embed import components
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d,
LinearAxis, HoverTool, Text,
SingleIntervalTicker, Slider, Callback
)
from bokeh.palettes import Spectral6
from bokeh.plotting import vplot, hplot
from bokeh.resources import INLINE, Resources
from bokeh.templates import RESOURCES
def _get_data():
# Get the data
fertility_df = pd.read_csv('assets/fertility.csv', index_col='Country')
life_expectancy_df = pd.read_csv('assets/life_expectancy.csv', index_col='Country')
population_df = pd.read_csv('assets/population.csv', index_col='Country')
regions_df = pd.read_csv('assets/regions.csv', index_col='Country')
columns = list(fertility_df.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility_df = fertility_df.rename(columns=rename_dict)
life_expectancy_df = life_expectancy_df.rename(columns=rename_dict)
population_df = population_df.rename(columns=rename_dict)
regions_df = regions_df.rename(columns=rename_dict)
scale_factor = 200
population_df_size = np.sqrt(population_df / np.pi) / scale_factor
min_size = 3
population_df_size = population_df_size.where(population_df_size >= min_size).fillna(min_size)
regions_df.Group = regions_df.Group.astype('category')
regions = list(regions_df.Group.cat.categories)
def get_color(r):
return Spectral6[regions.index(r.Group)]
regions_df['region_color'] = regions_df.apply(get_color, axis=1)
return (years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df)
def _get_plot():
years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df = _get_data()
# Set-up the sources
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
# Build the plot
# Set up the plot
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# Add the year in background (add before circle)
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code = """
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
renderer_source.trigger('change');
text_source.set('data', {'year': [String(year)]});
text_source.trigger('change');
""" % js_source_array
callback = Callback(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback)
callback.args["slider"] = slider
callback.args["renderer_source"] = renderer_source
callback.args["text_source"] = text_source
# Lay it out
return vplot(plot, hplot(slider))
def get_bubble_html(plot=None):
if plot:
layout = plot
else:
layout = _get_plot()
with open('assets/bubble_template.html', 'r') as f:
template = Template(f.read())
resources = Resources(mode='server', root_url='/tree/')
bokeh_js = RESOURCES.render(js_files=resources.js_files)
script, div = components(layout)
html = template.render(
title="Bokeh - Gapminder demo",
bokeh_js=bokeh_js,
plot_script=script,
plot_div=div,
)
return html
def get_1964_data():
years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df = _get_data()
year = 1964
region_color = regions_df['region_color']
region_color.name = 'region_color'
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
return new_df
def get_scatter_data():
years, regions, fertility_df, life_expectancy_df, population_df_size, regions_df = _get_data()
xyvalues = OrderedDict()
xyvalues['1964'] = list(
zip(
fertility_df[1964].dropna().values,
life_expectancy_df[1964].dropna().values
)
)
return xyvalues
| unlicense |
Vimos/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
frank-tancf/scikit-learn | sklearn/utils/fixes.py | 7 | 13511 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
def parallel_helper(obj, methodname, *args, **kwargs):
"""Helper to workaround Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
| bsd-3-clause |
INM-6/nest-git-migration | testsuite/manualtests/test_tsodyks_depr_fac.py | 13 | 1136 | # -*- coding: utf-8 -*-
#
# test_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-0-0-4.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
| gpl-2.0 |
iismd17/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
shikhardb/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/stats/tests/test_morestats.py | 17 | 54641 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A, crit, sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = stats.binom_test(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = stats.binom_test([682, 243], p=3.0/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
def test_wilcoxon_arg_type():
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt")
_ = stats.wilcoxon(arr, zero_method="zsplit")
_ = stats.wilcoxon(arr, zero_method="wilcox")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| mit |
wanggang3333/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
juliusbierk/scikit-image | doc/examples/plot_gabor.py | 11 | 4450 | """
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
| bsd-3-clause |
zploskey/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
scienceopen/gaussfitter | ah_bootstrap.py | 31 | 36162 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| bsd-3-clause |
WheatonCS/Lexos | test/unit_test/test_kmeans_model.py | 1 | 8711 | import numpy as np
import pandas as pd
from lexos.helpers.error_messages import EMPTY_DTM_MESSAGE
from lexos.models.k_means_model import KMeansTestOptions, KMeansModel
from lexos.receivers.k_means_receiver import KMeansOption, KMeansViz, \
KMeansInit
# ------------------------- Voronoi test suite --------------------------------
# Create test DTM for voronoi.
voronoi_dtm = pd.DataFrame(
data=np.array(
[(100, 100, 100, 100, 100, 200, 900, 100),
(100, 200, 200, 100, 300, 100, 600, 100),
(10, 300, 400, 100, 200, 400, 700, 1000),
(100, 400, 100, 100, 100, 100, 100, 100)]
),
index=np.array([0, 1, 2, 3]),
columns=np.array(["A", "B", "C", "D", "E", "F", "G", "H"]))
# Create test id temp label map.
id_temp_label_map_voronoi = \
{0: "F1.txt", 1: "F2.txt", 2: "F3.txt", 3: "F4.txt"}
# Create test front end option for voronoi.
front_end_option_voronoi = KMeansOption(
viz=KMeansViz.voronoi,
n_init=10,
k_value=2,
max_iter=100,
tolerance=1e-4,
init_method=KMeansInit.k_means,
text_color="#000000"
)
# Pack all test components.
test_option_voronoi = KMeansTestOptions(
doc_term_matrix=voronoi_dtm,
front_end_option=front_end_option_voronoi,
document_label_map=id_temp_label_map_voronoi
)
# Create test Model and get test result.
test_voronoi = KMeansModel(test_options=test_option_voronoi)
# noinspection PyProtectedMember
voronoi_result = test_voronoi._get_voronoi_result()
# NO LONGER A ".data" ATTRIBUTE
# ------------------------- Test voronoi plot result --------------------------
# class TestVoronoiPlot:
# # Get plot result.
# plot = voronoi_result
#
# def test_heat_map(self):
# assert self.plot.data[0]["type"] == "heatmap"
# assert self.plot.data[0]["hoverinfo"] == "skip"
#
# def test_centroid(self):
# assert self.plot.data[1]["type"] == "scatter"
# assert self.plot.data[1]["text"] == "Centroid 1"
# assert round(self.plot.data[1]["x"][0], 4) in [738.6971, -246.2324]
# assert round(self.plot.data[1]["y"][0], 4) in [38.3726, -115.1177]
#
# def test_scatter(self):
# assert self.plot.data[3]["mode"] == "markers"
# assert round(self.plot.data[3]["x"][0], 4) in [738.6971, -128.5943]
# assert round(self.plot.data[3]["y"][0], 4) in [411.5624, -115.1177]
#
# -----------------------------------------------------------------------------
# ------------------------- 2D scatter test suite -----------------------------
dtm_two_d = pd.DataFrame(
data=np.array(
[(100, 100, 100, 100, 100, 200, 900, 100),
(100, 200, 200, 100, 300, 100, 600, 100),
(10, 300, 400, 100, 200, 400, 700, 1000),
(100, 400, 100, 100, 100, 100, 100, 100)]
),
index=np.array([0, 1, 2, 3]),
columns=np.array(["A", "B", "C", "D", "E", "F", "G", "H"]))
# Create test id temp label map.
id_temp_label_map_two_d = \
{0: "F1.txt", 1: "F2.txt", 2: "F3.txt", 3: "F4.txt"}
# Create test front end option for 2D.
front_end_option_two_d = KMeansOption(
viz=KMeansViz.two_d,
n_init=10,
k_value=2,
max_iter=100,
tolerance=1e-4,
init_method=KMeansInit.k_means,
text_color="#000000"
)
# Pack all test components.
test_option_two_d = KMeansTestOptions(
doc_term_matrix=dtm_two_d,
front_end_option=front_end_option_two_d,
document_label_map=id_temp_label_map_two_d
)
# Create test Model and get test result.
test_two_d = KMeansModel(test_options=test_option_two_d)
# noinspection PyProtectedMember
two_d_result = test_two_d._get_2d_scatter_result()
# NO LONGER A ".data" ATTRIBUTE
# ------------------------- Test 2D scatter result --------------------------
# class Test2DScatter:
# plot = two_d_result
#
# def test_layout(self):
# assert self.plot.layout["hovermode"] == "closest"
#
# def test_scatter(self):
# assert self.plot.data[0]["type"] == "scatter"
# assert round(self.plot.data[0]["x"][0], 4) in [738.6971, -128.5943]
# assert round(self.plot.data[0]["y"][0], 4) in [411.5624, -115.1177]
# assert self.plot.data[0]["hoverinfo"] == "text"
# assert self.plot.data[0]["mode"] == "markers"
# assert self.plot.data[0]["name"] == "Cluster 1"
#
# assert self.plot.data[1]["type"] == "scatter"
# assert round(self.plot.data[1]["x"][0], 4) in [738.6971, -128.5943]
# assert round(self.plot.data[1]["y"][0], 4) in [411.5624, -115.1177]
# -----------------------------------------------------------------------------
# ------------------------- 3D scatter test suite -----------------------------
dtm_three_d = pd.DataFrame(
data=np.array(
[(100, 100, 100, 100, 100, 200, 900, 100),
(100, 200, 200, 100, 300, 100, 600, 100),
(10, 300, 400, 100, 200, 400, 700, 1000),
(100, 400, 100, 100, 100, 100, 100, 100)]
),
index=np.array([0, 1, 2, 3]),
columns=np.array(["A", "B", "C", "D", "E", "F", "G", "H"]))
# Create test id temp label map.
id_temp_label_map_three_d = \
{0: "F1.txt", 1: "F2.txt", 2: "F3.txt", 3: "F4.txt"}
# Create test front end option for 3D.
front_end_option_three_d = KMeansOption(
viz=KMeansViz.three_d,
n_init=10,
k_value=2,
max_iter=100,
tolerance=1e-4,
init_method=KMeansInit.k_means,
text_color="#000000"
)
# Pack all test components.
test_option_three_d = KMeansTestOptions(
doc_term_matrix=dtm_three_d,
front_end_option=front_end_option_three_d,
document_label_map=id_temp_label_map_three_d
)
# Create test Model and get test result.
test_three_d = KMeansModel(test_options=test_option_three_d)
# noinspection PyProtectedMember
three_d_result = test_three_d._get_3d_scatter_result()
# NO LONGER A ".data" ATTRIBUTE
# ------------------------- 3D scatter test suite -----------------------------
# class Test3DScatter:
# plot = three_d_result
#
# def test_scatter(self):
# assert self.plot.data[0]["type"] == "scatter3d"
# assert round(self.plot.data[0]["x"][0], 4) in [738.6971, -128.5943]
# assert round(self.plot.data[0]["y"][0], 4) in [411.5624, - 115.1177]
# assert round(self.plot.data[0]["z"][0], 4) in [-2.3939, -94.6634]
# assert self.plot.data[0]["hoverinfo"] == "text"
# assert self.plot.data[0]["mode"] == "markers"
# assert self.plot.data[0]["name"] == "Cluster 1"
# assert self.plot.data[1]["type"] == "scatter3d"
# assert round(self.plot.data[1]["x"][0], 4) in [738.6971, -128.5943]
# assert round(self.plot.data[1]["y"][0], 4) in [411.5624, -115.1177]
# assert round(self.plot.data[1]["z"][0], 4) in [-2.3939, -94.6634]
# -----------------------------------------------------------------------------
# ------------------------- Special test suite --------------------------------
# Create test DTM.
dtm_empty = pd.DataFrame()
# Create test id temp label map.
id_temp_label_map_empty = {}
# Create front end option for voronoi.
# noinspection PyTypeChecker
front_end_option_special = KMeansOption(
viz="wrong",
n_init=10,
k_value=2,
max_iter=100,
tolerance=1e-4,
init_method=KMeansInit.k_means,
text_color="#000000"
)
# Pack all test components.
test_option_empty = KMeansTestOptions(
doc_term_matrix=dtm_empty,
front_end_option=front_end_option_special,
document_label_map=id_temp_label_map_empty
)
# Create empty K-Means test.
test_empty = KMeansModel(test_options=test_option_empty)
# -----------------------------------------------------------------------------
# Create dtm special.
dtm_special = pd.DataFrame(data=[[1, 2], [1, 2]],
index=[0, 1],
columns=["A", "B"])
# Create test id temp label map.
id_temp_label_map_special = {0: "F1.txt", 1: "F2.txt"}
# Create special front end option.
test_option_special = KMeansTestOptions(
doc_term_matrix=dtm_special,
front_end_option=front_end_option_special,
document_label_map=id_temp_label_map_special
)
# Create special K-Means test.
test_special = KMeansModel(test_options=test_option_special)
class TestSpecialCase:
def test_empty_dtm(self):
try:
_ = test_empty.get_results()
raise AssertionError("Expected error message did not raise.")
except AssertionError as error:
assert str(error) == EMPTY_DTM_MESSAGE
def test_special_dtm(self):
try:
_ = test_special.get_results()
raise AssertionError("Expected error message did not raise.")
except ValueError as error:
assert \
str(error) == "Invalid K-Means analysis option from front end."
| mit |
f3r/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
drivendataorg/drivendata-submission-validator | drivendata_validator/drivendata_validator.py | 1 | 6477 | #!/usr/bin/python2
from __future__ import print_function
import sys
import json
import pandas as pd
import numpy as np
class DrivenDataValidationError(Exception):
""" Custom Exception class for validation errors that we can anticipate. These messages
are returned to the user. Other unanticipated exceptions get a generic message we pass to the user.
"""
def __init__(self, message, errors=None):
# Call the base class constructor with the parameters it needs
super(DrivenDataValidationError, self).__init__(message)
self.errors = errors
class DrivenDataValidator(object):
""" Validator class.
Accepts a dictionary that is passed to pandas.read_csv -- for options see:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
"""
def __init__(self, **read_csv_kwargs):
self.validation_kwargs = read_csv_kwargs
# default validation kwargs for pd.read_csv() if read_csv_kwargs is empty
if not self.validation_kwargs:
self.validation_kwargs = {
"index_col": 0,
"skipinitialspace": True
}
def validate(self, format_path, submission_path, skip_validating_dataset=False):
""" Validates that a submission is in the proper format
:param format_path: a string that is the path to the submission format file
:param submission_path: a string that is the path to the actual submission to validate
:return: The data frame for the submission if we pass
"""
# load the data
format_df = pd.read_csv(format_path, **self.validation_kwargs)
# automatically validate and return the dataframe if we're comparing something to itself.
# Refs:
# - Aristotle's law of identity, 'Metaphysics', 1st century CE
# - "A is A." - 'Atlas Shrugged', Ayn Rand, 1957 <- lulz
#
if format_path == submission_path:
return format_df
submission_df = pd.read_csv(submission_path, **self.validation_kwargs)
# just return the unadulterated df if we know this is what we're after
if skip_validating_dataset:
return submission_df
# verify that the headers match
if format_df.columns.values.tolist() != submission_df.columns.values.tolist():
error_str = 'CSV Headers do not match. Submission requires that first line is: "{}" You submitted: "{}" '
# get all of the headers
format_headers = [format_df.index.name if format_df.index.name else ""] + \
format_df.columns.values.tolist()
sub_headers = [submission_df.index.name if submission_df.index.name else ""] + \
submission_df.columns.values.tolist()
raise DrivenDataValidationError(error_str.format(",".join(format_headers),
",".join(sub_headers)))
# verify the submission has the proper number of rows
if len(format_df.index) != len(submission_df.index):
error_str = 'Submission has {} rows but should have {}.'
raise DrivenDataValidationError(error_str.format(len(submission_df.index),
len(format_df.index)))
# verify the submission has the right row ids
if np.any(format_df.index.values != submission_df.index.values):
error_str = 'IDs for submission are not correct.'
raise DrivenDataValidationError(error_str)
# verify that the dtypes parse properly
if np.any(format_df.dtypes != submission_df.dtypes):
error_str = "Unexpected data types in submission. " \
"\n Expected dtypes: \t'{}' \n Submitted dtypes: \t'{}'"
raise DrivenDataValidationError(error_str.format(format_df.dtypes.values.tolist(),
submission_df.dtypes.values.tolist()))
# verify that there are no nans if we don't expect any nans (pd.isnull handles all dtypes)
if pd.isnull(submission_df.values).any() and not pd.isnull(format_df.values).any():
error_str = 'Your submission contains NaNs or blanks, which are not expected. Please change these to ' \
'numeric predictions. See ids: {}'
# figure out which rows contain nans
nan_mask = pd.isnull(submission_df.values).astype(int).sum(axis=1).astype(bool)
nan_ids = submission_df.index.values[nan_mask]
raise DrivenDataValidationError(error_str.format(nan_ids.tolist()))
return submission_df
def is_valid(self, format_path, submission_path, print_errors=False):
""" A wrapper around validate to return True/False
"""
try:
self.validate(format_path, submission_path)
return True
except Exception as e:
if print_errors:
print(e)
return False
def main():
# args are submission format, submission file, [optional] kwargs_json
if len(sys.argv) not in [3, 4]:
print("Usage: python DrivenDataValidator.py <path_to_submission_format_file> "
"<path_to_your_submission_file> [<path_to_pandas_read_csv_kwargs_json>]")
else:
print("Checking all of your ducks to see if they are in a row...\n")
read_csv_kwargs = {}
if len(sys.argv) == 4:
with open(sys.argv[3], "r") as json_file:
read_csv_kwargs = json.load(json_file)
try:
validator = DrivenDataValidator(**read_csv_kwargs)
validator.validate(sys.argv[1], sys.argv[2])
print("Nice work, amig[a|o] Your submission is valid. Submit it on www.drivendata.org!")
except DrivenDataValidationError as anticipated_error:
print("Caught anticipated error. Fix the below and retry.")
print("--------------------------------------------------")
print(anticipated_error)
except Exception as e:
print("Unanticipated error. What have you done??")
print("-----------------------------------------")
# re-raise so y'all can read the trace
raise
if __name__ == "__main__": # pragma: no cover (NB: tested with subprocess, so we don't see the coverage)
main()
| mit |
erikrose/more-itertools | more_itertools/more.py | 1 | 96023 | import warnings
from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence
from functools import partial, wraps
from heapq import merge, heapify, heapreplace, heappop
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
from math import exp, floor, log
from random import random, randrange, uniform
from operator import itemgetter, sub, gt, lt
from sys import maxsize
from time import monotonic
from .recipes import consume, flatten, powerset, take, unique_everseen
__all__ = [
'adjacent',
'always_iterable',
'always_reversible',
'bucket',
'chunked',
'circular_shifts',
'collapse',
'collate',
'consecutive_groups',
'consumer',
'count_cycle',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'exactly_n',
'filter_except',
'first',
'groupby_transform',
'ilen',
'interleave_longest',
'interleave',
'intersperse',
'islice_extended',
'iterate',
'ichunked',
'last',
'locate',
'lstrip',
'make_decorator',
'map_except',
'map_reduce',
'nth_or_last',
'numeric_range',
'one',
'only',
'padded',
'partitions',
'set_partitions',
'peekable',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'sample',
'seekable',
'SequenceView',
'side_effect',
'sliced',
'sort_together',
'split_at',
'split_after',
'split_before',
'split_when',
'split_into',
'spy',
'stagger',
'strip',
'substrings',
'substrings_indexes',
'time_limited',
'unique_to_each',
'unzip',
'windowed',
'with_iter',
'zip_offset',
]
_marker = object()
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
"""
return iter(partial(take, n, iter(iterable)), [])
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration:
# I'm on the edge about raising ValueError instead of StopIteration. At
# the moment, ValueError wins, because the caller could conceivably
# want to do something different with flow control when I raise the
# exception, and it's weird to explicitly catch StopIteration.
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
)
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
try:
# Try to access the last item directly
return iterable[-1]
except (TypeError, AttributeError, KeyError):
# If not slice-able, iterate entirely using length-1 deque
return deque(iterable, maxlen=1)[0]
except IndexError: # If the iterable was empty
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no '
'default value was provided.'
)
return default
def nth_or_last(iterable, n, default=_marker):
"""Return the nth or the last item of *iterable*,
or *default* if *iterable* is empty.
>>> nth_or_last([0, 1, 2, 3], 2)
2
>>> nth_or_last([0, 1], 2)
1
>>> nth_or_last([], 0, 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
return last(islice(iterable, n + 1), default=default)
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhaused
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def collate(*iterables, **kwargs):
"""Return a sorted merge of the items from each of several already-sorted
*iterables*.
>>> list(collate('ACDZ', 'AZ', 'JKL'))
['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
Works lazily, keeping only the next value from each iterable in memory. Use
:func:`collate` to, for example, perform a n-way mergesort of items that
don't fit in memory.
If a *key* function is specified, the iterables will be sorted according
to its result:
>>> key = lambda s: int(s) # Sort by numeric value, not by string
>>> list(collate(['1', '10'], ['2', '11'], key=key))
['1', '2', '10', '11']
If the *iterables* are sorted in descending order, set *reverse* to
``True``:
>>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
[5, 4, 3, 2, 1, 0]
If the elements of the passed-in iterables are out of order, you might get
unexpected results.
On Python 3.5+, this function is an alias for :func:`heapq.merge`.
"""
warnings.warn(
"collate is no longer part of more_itertools, use heapq.merge",
DeprecationWarning,
)
return merge(*iterables, **kwargs)
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration:
raise too_short or ValueError('too few items in iterable (expected 1)')
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def distinct_permutations(iterable):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
"""
def make_new_permutations(pool, e):
"""Internal helper function.
The output permutations are built up by adding element *e* to the
current *permutations* at every possible position.
The key idea is to keep repeated elements (reverse) ordered:
if e1 == e2 and e1 is before e2 in the iterable, then all permutations
with e1 before e2 are ignored.
"""
for perm in pool:
for j in range(len(perm)):
yield perm[:j] + (e,) + perm[j:]
if perm[j] == e:
break
else:
yield perm + (e,)
permutations = [()]
for e in iterable:
permutations = make_new_permutations(permutations, e)
return (tuple(t) for t in permutations)
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values::
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
it = iter(seq)
window = deque([], n)
append = window.append
# Initial deque fill
for _ in range(n):
append(next(it, fillvalue))
yield tuple(window)
# Appending new items to the right causes old items to fall off the left
i = 0
for item in it:
append(item)
i = (i + 1) % step
if i % step == 0:
yield tuple(window)
# If there are items from the iterable in the window, pad with the given
# value and emit them.
if (i % step) and (step - i < n):
for _ in range(step - i):
append(fillvalue)
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head, chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
If the length of the sequence is not divisible by the requested slice
length, the last slice will be shorter.
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
return takewhile(len, (seq[i : i + n] for i in count(0, n)))
def split_at(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
The lists do not include the delimiting items:
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(
zip(
*sorted(
zip(*iterables), key=itemgetter(*key_list), reverse=reverse
)
)
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None):
"""An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
res = groupby(iterable, keyfunc)
return ((k, map(valuefunc, g)) for k, g in res) if valuefunc else res
class numeric_range(abc.Sequence, abc.Hashable):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = iter(numeric_range(start, stop, step))
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
_EMPTY_HASH = hash(range(0, 0))
def __init__(self, *args):
argc = len(args)
if argc == 1:
self._stop, = args
self._start = type(self._stop)(0)
self._step = type(self._stop - self._start)(1)
elif argc == 2:
self._start, self._stop = args
self._step = type(self._stop - self._start)(1)
elif argc == 3:
self._start, self._stop, self._step = args
elif argc == 0:
raise TypeError('numeric_range expected at least '
'1 argument, got {}'.format(argc))
else:
raise TypeError('numeric_range expected at most '
'3 arguments, got {}'.format(argc))
self._zero = type(self._step)(0)
if self._step == self._zero:
raise ValueError('numeric_range() arg 3 must not be zero')
self._growing = self._step > self._zero
self._init_len()
def __bool__(self):
if self._growing:
return self._start < self._stop
else:
return self._start > self._stop
def __contains__(self, elem):
if self._growing:
if self._start <= elem < self._stop:
return (elem - self._start) % self._step == self._zero
else:
if self._start >= elem > self._stop:
return (self._start - elem) % (-self._step) == self._zero
return False
def __eq__(self, other):
if isinstance(other, numeric_range):
empty_self = not bool(self)
empty_other = not bool(other)
if empty_self or empty_other:
return empty_self and empty_other # True if both empty
else:
return (self._start == other._start
and self._step == other._step
and self._get_by_index(-1) == other._get_by_index(-1))
else:
return False
def __getitem__(self, key):
if isinstance(key, int):
return self._get_by_index(key)
elif isinstance(key, slice):
step = self._step if key.step is None else key.step * self._step
if key.start is None or key.start <= -self._len:
start = self._start
elif key.start >= self._len:
start = self._stop
else: # -self._len < key.start < self._len
start = self._get_by_index(key.start)
if key.stop is None or key.stop >= self._len:
stop = self._stop
elif key.stop <= -self._len:
stop = self._start
else: # -self._len < key.stop < self._len
stop = self._get_by_index(key.stop)
return numeric_range(start, stop, step)
else:
raise TypeError(
'numeric range indices must be '
'integers or slices, not {}'.format(type(key).__name__))
def __hash__(self):
if self:
return hash((self._start, self._get_by_index(-1), self._step))
else:
return self._EMPTY_HASH
def __iter__(self):
values = (self._start + (n * self._step) for n in count())
if self._growing:
return takewhile(partial(gt, self._stop), values)
else:
return takewhile(partial(lt, self._stop), values)
def __len__(self):
return self._len
def _init_len(self):
if self._growing:
start = self._start
stop = self._stop
step = self._step
else:
start = self._stop
stop = self._start
step = -self._step
distance = stop - start
if distance <= self._zero:
self._len = 0
else: # distance > 0 and step > 0: regular euclidean division
q, r = divmod(distance, step)
self._len = int(q) + int(r != self._zero)
def __reduce__(self):
return numeric_range, (self._start, self._stop, self._step)
def __repr__(self):
if self._step == 1:
return "numeric_range({}, {})".format(repr(self._start),
repr(self._stop))
else:
return "numeric_range({}, {}, {})".format(repr(self._start),
repr(self._stop),
repr(self._step))
def __reversed__(self):
return iter(numeric_range(self._get_by_index(-1),
self._start - self._step, -self._step))
def count(self, value):
return int(value in self)
def index(self, value):
if self._growing:
if self._start <= value < self._stop:
q, r = divmod(value - self._start, self._step)
if r == self._zero:
return int(q)
else:
if self._start >= value > self._stop:
q, r = divmod(self._start - value, -self._step)
if r == self._zero:
return int(q)
raise ValueError("{} is not in numeric range".format(value))
def _get_by_index(self, i):
if i < 0:
i += self._len
if i < 0 or i >= self._len:
raise IndexError("numeric range object index out of range")
return self._start + i * self._step
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
def islice_extended(iterable, *args):
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
"""
s = slice(*args)
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
it = iter(iterable)
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`itertools.accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
Since Python 3.8, :func:`itertools.accumulate` can be supplied with an
*initial* keyword argument. If :func:`difference` is called with *initial*
set to something other than ``None``, it will skip the first element when
computing successive differences.
>>> iterable = [100, 101, 103, 106] # accumate([1, 2, 3], initial=100)
>>> list(difference(iterable, initial=100))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, starmap(func, zip(b, a)))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
By default, the cache grows as the source iterable progresses, so beware of
wrapping very large or infinite iterables. Supply *maxlen* to limit the
size of the cache (this of course limits how far back you can seek).
>>> from itertools import count
>>> it = seekable((str(n) for n in count()), maxlen=2)
>>> next(it), next(it), next(it), next(it)
('0', '1', '2', '3')
>>> list(it.elements())
['2', '3']
>>> it.seek(0)
>>> next(it), next(it), next(it), next(it)
('2', '3', '4', '5')
>>> next(it)
'6'
"""
def __init__(self, iterable, maxlen=None):
self._source = iter(iterable)
if maxlen is None:
self._cache = []
else:
self._cache = deque([], maxlen)
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-perserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
def time_limited(limit_seconds, iterable):
"""
Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
start_time = monotonic()
for item in iterable:
if monotonic() - start_time > limit_seconds:
break
yield item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = iter(iterable)
while True:
# Check to see whether we're at the end of the source iterable
item = next(source, _marker)
if item is _marker:
return
# Clone the source and yield an n-length slice
source, it = tee(chain([item], source))
yield islice(it, n)
# Advance the source iterable
consume(source, n)
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
else:
pool = tuple(iterable)
for i, prefix in unique_everseen(enumerate(pool), key=itemgetter(1)):
for suffix in distinct_combinations(pool[i + 1 :], r - 1):
yield (prefix,) + suffix
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
exceptions = tuple(exceptions)
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should be a accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
exceptions = tuple(exceptions)
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
| mit |
ojengwa/team1 | gui.py | 3 | 2820 | import Tkinter
from Tkinter import Menu
import tkMessageBox
from main import *
import networkx as nx
import matplotlib.pyplot as plt
import sys
from time import clock
reload(sys)
sys.setdefaultencoding("utf-8")
app = None
link = None
depthname = None
def display_gui(web='',dept=1):
global app, link, depthname
app = Tkinter.Tk()
app.title("LINK ANALYSER")
app.geometry('450x300+200+200')
menubar = Menu(app)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Quit", command=app.quit)
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About Us", command=aboutProject)
menubar.add_cascade(label="Help", menu=helpmenu)
app.config(menu=menubar)
headertext2 = Tkinter.StringVar()
headertext2.set("")
label10 = Tkinter.Label(app,textvariable=headertext2,height=1)
label10.pack()
headertext = Tkinter.StringVar()
headertext.set("")
label0 = Tkinter.Label(app,textvariable=headertext,height=4)
label0.pack()
labeltext = Tkinter.StringVar()
labeltext.set("Website url")
label1 = Tkinter.Label(app,textvariable=labeltext,height=1)
label1.pack()
url = Tkinter.StringVar(None)
url.set(web)
link = Tkinter.Entry(app,textvariable=url,)
link.pack()
labeltext = Tkinter.StringVar()
labeltext.set("Depth")
label1 = Tkinter.Label(app,textvariable=labeltext,height=1)
label1.pack()
deptvalue = Tkinter.IntVar(None)
deptvalue.set(dept)
depthname = Tkinter.Entry(app,textvariable=deptvalue,text=dept)
depthname.pack()
button1 = Tkinter.Button(app,text="Submit",width=20,command=changeLabel)
button1.pack(side='bottom' ,padx=15,pady=15)
app.mainloop()
def do_get(site,num):
start = clock()
(_ROOT, _DEPTH, _BREADTH) = range(3)
print site,num
G=nx.Graph()
crawl = analyse_web(site,num)
for child in crawl:
G.add_node(child)
if crawl[child]['parent'] != 'root':
G.add_edge(crawl[child]['parent'],child)
# display
nx.draw(G,node_size=20,alpha=0.5,node_color="blue", with_labels=True)
#fig, ax = plt.subplots()
plt.savefig("node_colormap.png") # save as png
print ("Total time: "+`clock() - start`)
plt.show()
def changeLabel():
global app, link, depthname
site = link.get()
num = depthname.get()
if site[:7]!= "http://" and site[:8]!= "https://":
tkMessageBox.showinfo("Error","The url is invalid")
return
elif int(num)<1:
tkMessageBox.showinfo("Error","The depth should be greater than 1")
return
app.destroy()
#display_gui()
#t1.join()
do_get(site,num)
display_gui(site,num)
| mit |
jukkahuhtamaki/crowdfunding-data | scrapecampaign.py | 1 | 1454 | import codecs
from bs4 import BeautifulSoup
with codecs.open('data/01-raw/investments-neptune-click00085.html',
mode='r', encoding="utf-8") as f:
content = f.read()
soup = BeautifulSoup(content)
rows = list()
for html_row in soup.find_all('div', class_='i-funder-row'):
try:
investor_link = html_row.find_all('a')[0]
investor = investor_link.string.strip()
investor_id = investor_link['href']
profile_available = True
except IndexError:
investor = html_row.find('div', class_='i-name').string.strip()
investor_id = None
profile_available = False
time_ago = html_row.find_all('div', class_='i-note')[0].string
time_ago_value = time_ago.split(' ')[0]
time_ago_unit = time_ago.split(' ')[1]
# print days_ago.split(' ')[0]
# print html_row.find_all('span', class_='currency')
try:
amount = html_row.find_all('span',
class_='currency')[0].find('span').string[1:].replace(',','')
amount_disclosed = True
except IndexError:
amount_disclosed = False
amount = None
# # print row
row = [profile_available,investor_id,investor,time_ago,
time_ago_value,time_ago_unit,amount_disclosed,amount]
rows.append(row)
import pandas as pd
sheet = pd.DataFrame(rows,columns=['profile_available','investor_id',
'investor','time_ago','time_ago_value','time_ago_unit','amount_disclosed',
'amount'])
sheet.to_csv('data/02-refined/investments-neptune.csv',encoding='utf-8')
| mit |
harisbal/pandas | pandas/tests/io/msgpack/test_pack.py | 9 | 4922 | # coding: utf-8
import pytest
import struct
from pandas import compat
from pandas.compat import u, OrderedDict
from pandas.io.msgpack import packb, unpackb, Unpacker, Packer
class TestPack(object):
def check(self, data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
assert re == data
def testPack(self):
test_data = [
0, 1, 127, 128, 255, 256, 65535, 65536,
-1, -32, -33, -128, -129, -32768, -32769,
1.0,
b"", b"a", b"a" * 31, b"a" * 32,
None, True, False,
(), ((),), ((), None,),
{None: 0},
(1 << 23),
]
for td in test_data:
self.check(td)
def testPackUnicode(self):
test_data = [u(""), u("abcd"), [u("defgh")], u("Русский текст"), ]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
assert re == td
packer = Packer(encoding='utf-8')
data = packer.pack(td)
re = Unpacker(
compat.BytesIO(data), encoding='utf-8', use_list=1).unpack()
assert re == td
def testPackUTF32(self):
test_data = [
compat.u(""),
compat.u("abcd"),
[compat.u("defgh")],
compat.u("Русский текст"),
]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
assert re == td
def testPackBytes(self):
test_data = [b"", b"abcd", (b"defgh", ), ]
for td in test_data:
self.check(td)
def testIgnoreUnicodeErrors(self):
re = unpackb(
packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore',
use_list=1)
assert re == "abcdef"
def testStrictUnicodeUnpack(self):
pytest.raises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'),
encoding='utf-8', use_list=1)
def testStrictUnicodePack(self):
pytest.raises(UnicodeEncodeError, packb, compat.u("abc\xeddef"),
encoding='ascii', unicode_errors='strict')
def testIgnoreErrorsPack(self):
re = unpackb(
packb(
compat.u("abcФФФdef"), encoding='ascii',
unicode_errors='ignore'), encoding='utf-8', use_list=1)
assert re == compat.u("abcdef")
def testNoEncoding(self):
pytest.raises(TypeError, packb, compat.u("abc"), encoding=None)
def testDecodeBinary(self):
re = unpackb(packb("abc"), encoding=None, use_list=1)
assert re == b"abc"
def testPackFloat(self):
assert packb(1.0,
use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
assert packb(
1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
def testArraySize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_array_header(size))
for i in range(size):
bio.write(packer.pack(i))
bio.seek(0)
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
def test_manualreset(self, sizes=[0, 5, 50, 1000]):
packer = Packer(autoreset=False)
for size in sizes:
packer.pack_array_header(size)
for i in range(size):
packer.pack(i)
bio = compat.BytesIO(packer.bytes())
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
packer.reset()
assert packer.bytes() == b''
def testMapSize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_map_header(size))
for i in range(size):
bio.write(packer.pack(i)) # key
bio.write(packer.pack(i * 2)) # value
bio.seek(0)
unpacker = Unpacker(bio)
for size in sizes:
assert unpacker.unpack() == {i: i * 2 for i in range(size)}
def test_odict(self):
seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
od = OrderedDict(seq)
assert unpackb(packb(od), use_list=1) == dict(seq)
def pair_hook(seq):
return list(seq)
assert unpackb(
packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
def test_pairlist(self):
pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
packer = Packer()
packed = packer.pack_map_pairs(pairlist)
unpacked = unpackb(packed, object_pairs_hook=list)
assert pairlist == unpacked
| bsd-3-clause |
visualfabriq/portalytics | tests/test_account_cluster_transformer.py | 1 | 1437 | import pandas as pd
import random
import pytest
from vf_portalytics.ml_helpers import AccountClusterTransformer
def make_dataset():
train_x = {
'account_banner': [random.choice(['A', 'B', 'C', 'D']) for i in range(100)],
'Var_1': [random.choice([22, 21, 19, 18]) for i in range(100)]
}
train_y = {
'Target_1': [random.uniform(1.7, 2) for i in range(100)]
}
train_x = pd.DataFrame(train_x)
train_y = pd.DataFrame(train_y)
return train_x, train_y
def test_prediction_model_account_clusters():
# Case 1
train_x_c1, train_y_c1 = make_dataset()
cat_feature = ['A', 'B']
transformer_c1 = AccountClusterTransformer(cat_feature)
transformed_x_c1 = transformer_c1.fit_transform(train_x_c1, train_y_c1)
assert all(train_x_c1['Var_1'] == transformed_x_c1['Var_1'])
assert all(transformed_x_c1['cluster'] != 0.0)
# Case 2
train_x_c2, train_y_c2 = make_dataset()
cat_feature_unknown = ['Unknown_Var']
transformer_c2 = AccountClusterTransformer(cat_feature_unknown)
with pytest.raises(KeyError):
transformer_c2.fit_transform(train_x_c2, train_y_c2)
# Case 3
train_x_c3, train_y_c3 = make_dataset()
cat_feature_vf = ['vf_category']
transformer_c3 = AccountClusterTransformer(cat_feature_vf)
transformed_x_c3 = transformer_c3.fit_transform(train_x_c3, train_y_c3)
assert all(transformed_x_c3['cluster'] == 0.0) | gpl-3.0 |
darioizzo/pykep | pykep/trajopt/_indirect.py | 2 | 27069 | import pykep as pk
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class _indirect_base(object):
"""Base class for indirect trajectory optimisation problems.
All inheriting classes will adopt ``plot_traj`` and ``plot_control`` and ``pretty`` and ``get_traj`` and have
to implement the methods ``_plot_traj`` and ``_pretty``:
"""
def __init__(
self, mass, thrust, isp,
mu, freemass, freetime, alpha, bound,
atol, rtol
):
# spacecraft
self.sc = pk.sims_flanagan.spacecraft(mass, thrust, isp)
# indirect leg
self.leg = pk.pontryagin.leg(
sc=self.sc, mu=mu, freemass=freemass, freetime=freetime, alpha=alpha, bound=bound
)
# integration parameters
if all([(isinstance(par, float) or isinstance(par, int)) for par in [atol, rtol]]):
self.atol = float(atol)
self.rtol = float(rtol)
else:
raise TypeError(
"Both atol and rtol must be an instance of either float or int.")
def fitness(self, z):
"""This function will be redefined in the inheriting classes
"""
pass
def _plot_traj(self, z, axes, units):
"""This function will be redefined in the inheriting classes
"""
pass
def _pretty(self, z):
"""This function will be redefined in the inheriting classes
"""
pass
def get_nobj(self):
return 1
def get_nec(self):
return self.leg.nec
def plot_traj(self, z, mark="k", atol=1e-12, rtol=1e-12, units=pk.AU, axes=None, quiver=False, length=1):
"""This function plots the 3 dimensional spacecraft trajectory, given a solution chromosome.
Args:
- z (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
- mark (``string``): matplotlib marker.
- atol (``float``, ``int``): absolute integration tolerance.
- rtol (``float``, ``int``): relative integration tolerance.
- units (``float``, ``int``): units by which to scale the trajectory dimensions.
- axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
- quiver (``bool``): when True the thrust is visualized with a quiver plot
- length (``float``): Length of thrust arrow if quiver is True
"""
# set problem
self.fitness(z)
# figure
if axes is None:
fig = plt.figure()
axes = fig.gca(projection='3d')
elif not isinstance(axes, Axes3D):
raise TypeError(
"Axis must be instance of matplotlib.axes._subplots.Axes3DSubplot.")
# problem specifics
self._plot_traj(z, axes, units)
# Sun
axes.scatter([0], [0], [0], color='y')
# leg
self.leg.plot_traj(axes, mark, atol, rtol, units,
quiver=quiver, length=length)
return axes
def plot_control(self, z, mark="k.-", atol=1e-12, rtol=1e-12, axes=None):
"""Plots the control profile of the trajectory, as a function of time.
Args:
- z (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
- mark (``string``): matplotlib marker.
- atol (``float``, ``int``): absolute integration tolerance.
- rtol (``float``, ``int``): relative integration tolerance.
- axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
"""
# set problem
self.fitness(z)
# create figure
if axes is None:
fig = plt.figure()
axes = fig.gca()
# leg
self.leg.plot('tof', 'u', mark=mark, atol=atol, rtol=rtol,
xlabel="Time [mjd2000]", ylabel="Throttle [ND]", axes=axes)
return axes
def get_traj(self, z):
"""Retrieves the trajectory information.
::
traj = [[t0, x0, y0, z0, vx0, vy0, vz0, m0, u0, ux0, uy0, uz0]
...
[tf, xf, yf, zf, vxf, vyf, vzf, mf, uf, uxf, uyf, uzf]]
Args:
- z (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
"""
# set leg
self.fitness(z)
# get states [t, x, y, z, vx, vy, vz, m, lx, ly, lz, lvx, lvy, lvz, lm,
# umag, ux, uy, uz, H]
x = self.leg.get_states()
# we make sure the throttles have the correct magnitude (get_states
# returns a direction as defined by the primer)
for t in x:
t[-4] *= t[-5]
t[-3] *= t[-5]
t[-2] *= t[-5]
return x
def pretty(self, z):
data = self.get_traj(z)
self._pretty(z)
print("\nSpacecraft Initial Position (m) : [{!r}, {!r}, {!r}]".format(
data[0, 1], data[0, 2], data[0, 3]))
print("Spacecraft Initial Velocity (m/s) : [{!r}, {!r}, {!r}]".format(
data[0, 4], data[0, 5], data[0, 6]))
print("Spacecraft Initial Mass (kg) : {!r}".format(data[0, 7]))
print("Spacecraft Final Position (m) : [{!r}, {!r}, {!r}]".format(
data[-1, 1], data[-1, 2], data[-1, 3]))
print("Spacecraft Final Velocity (m/s): [{!r}, {!r}, {!r}]".format(
data[-1, 4], data[-1, 5], data[-1, 6]))
print("Spacecraft Final Mass (kg) : {!r}".format(data[-1, 7]))
print("Used propellant (kg) : {!r}".format(
data[0, 7] - data[-1, 7]))
class indirect_pt2pt(_indirect_base):
"""
Represents an indirect trajectory optimisation problem between two Cartesian states with heliocentric dynamics.
The class can be used as UDP in pagmo.
The decision chromosome is
::
z = [T, l0]
"""
def __init__(self,
x0=[-51051524893.335152, -142842795180.97464, 1139935.2553601924,
30488.847061907356, -10612.482697050367, -204.23284335657095, 1000],
xf=[24753885674.871033, 231247560000.17883, 4236305010.4256544, -
23171.900670190855, 4635.6817290400222, 666.44019588506023, 910.48383959441833],
thrust=0.3,
isp=3000,
mu=pk.MU_SUN,
tof=[276.15166075931495, 276.15166075931495],
freetime=False,
alpha=0, # quadratic control
bound=False,
atol=1e-12,
rtol=1e-12):
"""
Constructs an instance of the ``pykep.trajopt.indirect_pt2pt`` problem.
Args:
- x0 (``list``, ``tuple``, ``numpy.ndarray``): Departure state [m, m, m, m/s, m/s, m/s, kg].
- xf (``list``, ``tuple``, ``numpy.ndarray``): Arrival state [m, m, m, m/s, m/s, m/s, kg].
- tof (``list``): Transfer time bounds [days].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- mu (``float``): Gravitational parameter of primary body [m^3/s^2].
- freetime (``bool``): Activates final time transversality condition. Allows final time to vary.
- alpha (``float``, ``int``): Homotopy parameter (0 -quadratic control, 1 - mass optimal)
- bound (``bool``): Activates bounded control, in which the control throttle is bounded between 0 and 1, otherwise the control throttle is allowed to unbounded.
- atol (``float``, ``int``): Absolute integration solution tolerance.
- rtol (``float``, ``int``): Relative integration solution tolerance.
"""
# Cartesian states
if not all([(isinstance(x, list) or isinstance(x, tuple) or isinstance(x, np.ndarray)) for x in [x0, xf]]):
raise TypeError(
"Both x0 and xf must be supplied as an instance of either list, tuple, or numpy.ndarray.")
elif not all([len(x) == 7 for x in [x0, xf]]):
raise TypeError(
"Both x0 and xf must be supplied with 7 dimensions.")
else:
self.x0 = pk.sims_flanagan.sc_state()
self.x0.set(x0)
self.xf = pk.sims_flanagan.sc_state()
self.xf.set(xf)
self.tof = tof
# initialise base
_indirect_base.__init__(
self, x0[-1], thrust, isp, mu, True, freetime, alpha, bound,
atol, rtol
)
def fitness(self, z):
# times
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# costates
l0 = np.asarray(z[1:])
# set leg
self.leg.set(t0, self.x0, l0, tf, self.xf)
# equality constraints
ceq = self.leg.mismatch_constraints(atol=self.atol, rtol=self.rtol)
return np.hstack(([1, ceq]))
def get_bounds(self):
lb = [self.tof[0]] + [-100] * 7
ub = [self.tof[1]] + [100] * 7
return (lb, ub)
def _plot_traj(self, z, axes, units):
# states
x0 = self.leg.x0
xf = self.leg.xf
# times
t0 = pk.epoch(self.leg.t0)
tf = pk.epoch(self.leg.tf)
# Computes the osculating Keplerian elements at start and arrival
elem0 = list(pk.ic2par(x0[0:3], x0[3:6], self.leg.mu))
elemf = list(pk.ic2par(xf[0:3], xf[3:6], self.leg.mu))
# Converts the eccentric anomaly into eccentric anomaly
elem0[5] = elem0[5] - elem0[1] * np.sin(elem0[5])
elemf[5] = elemf[5] - elemf[1] * np.sin(elemf[5])
# Creates two virtual keplerian planets with the said elements
kep0 = pk.planet.keplerian(t0, elem0)
kepf = pk.planet.keplerian(tf, elemf)
# Plots the departure and arrival osculating orbits
pk.orbit_plots.plot_planet(
kep0, t0, units=units, color=(0.8, 0.8, 0.8), axes=axes)
pk.orbit_plots.plot_planet(
kepf, tf, units=units, color=(0.8, 0.8, 0.8), axes=axes)
def _pretty(self, z):
print("\nPoint to point transfer: ")
print("\nFrom: " + str(self.x0))
print("To: " + str(self.xf))
print("Time of flight (days): {!r} ".format(z[0]))
class indirect_or2or(_indirect_base):
"""Represents an indirect trajectory optimisation problem between two orbits.
Decision chromosome is
::
z = [T, M0, Mf, l0]
"""
def __init__(self,
elem0=[149598261129.93335, 0.016711230601231957,
2.640492490927786e-07, 3.141592653589793, 4.938194050401601, 0],
elemf=[227943822376.03537, 0.09339409892101332,
0.032283207367640024, 0.8649771996521327, 5.000312830124232, 0],
mass=1000,
thrust=0.3,
isp=2500,
atol=1e-12,
rtol=1e-12,
tof=[100, 700],
freetime=True,
alpha=0,
bound=False,
mu=pk.MU_SUN):
"""Initialises ``pykep.trajopt.indirect_or2or`` problem.
Args:
- elem0 (``list``, ``tuple``, ``numpy.ndarray``): Departure Keplerian elements (mutable eccentric anomaly).
- elemf (``list``, ``tuple``, ``numpy.ndarray``): Arrival Keplerian elements (mutable eccentric anomaly).
- mass (``float``, ``int``): Spacecraft wet mass [kg].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- atol (``float``, ``int``): Absolute integration solution tolerance.
- rtol (``float``, ``int``): Relative integration solution tolerance.
- tof (``list``): Transfer time bounds [days].
- freetime (``bool``): Activates final time transversality condition. Allows final time to vary.
- alpha (``float``, ``int``): Homotopy parameter, governing the degree to which the theoretical control law is intended to reduce propellant expenditure or energy.
- bound (``bool``): Activates bounded control, in which the control throttle is bounded between 0 and 1, otherwise the control throttle is allowed to unbounded.
- mu (``float``): Gravitational parameter of primary body [m^3/s^2].
"""
# initialise base
_indirect_base.__init__(
self, mass, thrust, isp, mu, True, freetime, alpha, bound,
atol, rtol
)
# Keplerian elements
self.elem0 = np.asarray(elem0)
self.elemf = np.asarray(elemf)
# Time of flight bounds
self.tof = tof
def fitness(self, z):
# departure and arrival times
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# departure and arrival eccentric anomolies
M0 = z[1]
Mf = z[2]
# departure costates
l0 = np.asarray(z[3:])
# set Keplerian elements
elem0 = np.hstack([self.elem0[:5], [M0]])
elemf = np.hstack([self.elemf[:5], [Mf]])
# compute Cartesian states
r0, v0 = pk.par2ic(elem0, self.leg.mu)
rf, vf = pk.par2ic(elemf, self.leg.mu)
# departure and arrival states (xf[6] is unused)
x0 = pk.sims_flanagan.sc_state(r0, v0, self.sc.mass)
xf = pk.sims_flanagan.sc_state(rf, vf, self.sc.mass / 10)
# set leg
self.leg.set(t0, x0, l0, tf, xf)
# equality constraints
ceq = self.leg.mismatch_constraints(atol=self.atol, rtol=self.rtol)
# final mass
obj = self.leg.trajectory[-1, -1]
# Transversality conditions
# At start
lambdas0 = np.array(self.leg.trajectory[0, 7:13])
r0norm = np.sqrt(r0[0] * r0[0] + r0[1] * r0[1] + r0[2] * r0[2])
tmp = - pk.MU_SUN / r0norm**3
tangent = np.array([v0[0], v0[1], v0[2], tmp *
r0[0], tmp * r0[1], tmp * r0[2]])
tangent_norm = np.linalg.norm(tangent)
tangent = tangent / tangent_norm
T0 = np.dot(lambdas0, tangent)
# At end
lambdasf = np.array(self.leg.trajectory[-1, 7:13])
rfnorm = np.sqrt(rf[0] * rf[0] + rf[1] * rf[1] + rf[2] * rf[2])
tmp = - pk.MU_SUN / rfnorm**3
tangent = np.array([vf[0], vf[1], vf[2], tmp *
rf[0], tmp * rf[1], tmp * rf[2]])
tangent_norm = np.linalg.norm(tangent)
tangent = tangent / tangent_norm
Tf = np.dot(lambdasf, tangent)
return np.hstack(([1], ceq, [T0, Tf]))
def get_nec(self):
return self.leg.nec + 2
def get_bounds(self):
lb = [self.tof[0], -4 * np.pi, -4 * np.pi] + [-1e2] * 7
ub = [self.tof[1], 4 * np.pi, 4 * np.pi] + [1e2] * 7
return (lb, ub)
def _plot_traj(self, z, axes, units):
"""Plots spacecraft trajectory.
Args:
- z (``tuple``, ``list``, ``numpy.ndarray``): Decision chromosome.
- axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
- units (``float``, ``int``): Length unit by which to normalise data.
Examples:
>>> prob.extract(pykep.trajopt.indirect_or2or).plot_traj(pop.champion_x)
"""
# times
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# Mean Anomalies
M0 = z[1] - self.elem0[1] * np.sin(z[1])
Mf = z[2] - self.elemf[1] * np.sin(z[2])
elem0 = np.hstack([self.elem0[:5], [M0]])
elemf = np.hstack([self.elemf[:5], [Mf]])
# Keplerian points
kep0 = pk.planet.keplerian(t0, elem0)
kepf = pk.planet.keplerian(tf, elemf)
# planets
pk.orbit_plots.plot_planet(
kep0, t0=t0, units=units, axes=axes, color=(0.8, 0.8, 0.8))
pk.orbit_plots.plot_planet(
kepf, t0=tf, units=units, axes=axes, color=(0.8, 0.8, 0.8))
def _pretty(self, z):
print("\nOrbit to orbit transfer: ")
print("\nFrom: " + str(list(self.elem0)))
print("To: " + str(list(self.elemf)))
print("Time of flight (days): {!r} ".format(z[0]))
print("Starting mean anomaly (rad): {!r} ".format(z[1]))
print("Arrival mean anomaly (rad): {!r} ".format(z[2]))
class indirect_pt2or(_indirect_base):
"""Represents an indirect trajectory optimisation problem between a Cartesian state and an orbit.
Decision chromosome is
::
z = [T, Mf, l0]
"""
def __init__(self, x0, elemf, mass, thrust, isp, atol, rtol, tof, freetime=True, alpha=1, bound=True, mu=pk.MU_SUN):
"""Initialises ``pykep.trajopt.indirect_pt2or`` problem.
Args:
- x0 (``list``, ``tuple``, ``numpy.ndarray``): Departure state [m, m, m, m/s, m/s, m/s, kg].
- elemf (``list``, ``tuple``, ``numpy.ndarray``): Arrival Keplerian elements SI units. (mean anomaly will be changed).
- mass (``float``, ``int``): Spacecraft wet mass [kg].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- atol (``float``, ``int``): Absolute integration solution tolerance.
- rtol (``float``, ``int``): Relative integration solution tolerance.
- tof (``list``): Transfer time bounds [days].
- freetime (``bool``): Activates final time transversality condition. Allows final time to vary.
- alpha (``float``, ``int``): Homotopy parameter, governing the degree to which the theoretical control law is intended to reduce propellant expenditure or energy.
- bound (``bool``): Activates bounded control, in which the control throttle is bounded between 0 and 1, otherwise the control throttle is allowed to unbounded.
- mu (``float``): Gravitational parameter of primary body [m^3/s^2].
"""
# initialise base
_indirect_base.__init__(
self, mass, thrust, isp, mu, True, freetime, alpha, bound,
atol, rtol
)
# departure state and arrival Keplerian elements
self.x0 = np.asarray(x0, np.float64)
self.elemf = np.asarray(elemf, np.float64)
self.tof = tof
def fitness(self, z):
# times
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# final eccentric anomaly
Mf = z[1]
# initial costates
l0 = np.asarray(z[2:])
# set arrival Keplerian elements
self.elemf[5] = Mf
# departure state
x0 = pk.sims_flanagan.sc_state(self.x0[0:3], self.x0[3:6], self.x0[6])
# compute Cartesian arrival state
rf, vf = pk.par2ic(self.elemf, self.leg.mu)
xf = pk.sims_flanagan.sc_state(rf, vf, self.sc.mass / 10)
# set leg
self.leg.set(t0, x0, l0, tf, xf)
# equality constraints
ceq = self.leg.mismatch_constraints(atol=self.atol, rtol=self.rtol)
# final mass
# mf = self.leg.trajectory[-1, 6]
# Transversality condition at the end
lambdasf = np.array(self.leg.trajectory[-1, 7:13])
rfnorm = np.sqrt(rf[0] * rf[0] + rf[1] * rf[1] + rf[2] * rf[2])
tmp = - pk.MU_SUN / rfnorm**3
tangent = np.array([vf[0], vf[1], vf[2], tmp *
rf[0], tmp * rf[1], tmp * rf[2]])
tangent_norm = np.linalg.norm(tangent)
tangent = tangent / tangent_norm
Tf = np.dot(lambdasf, tangent)
return np.hstack(([1], ceq, [Tf]))
def get_nec(self):
return self.leg.nec + 1
def get_bounds(self):
lb = [self.tof[0], -4 * np.pi] + [-1e2] * 7
ub = [self.tof[1], 4 * np.pi] + [1e2] * 7
return (lb, ub)
def _plot_traj(self, z, axes, units=pk.AU):
"""Plots spacecraft trajectory.
Args:
- z (``tuple``, ``list``, ``numpy.ndarray``): Decision chromosome.
- axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
- units (``float``, ``int``): Length unit by which to normalise data.
Examples:
>>> prob.extract(pykep.trajopt.indirect_pt2or).plot_traj(pop.champion_x)
"""
# times
t0 = pk.epoch(0)
tf = pk.epoch(z[1])
# Keplerian elements of the osculating orbit at start
elem0 = list(pk.ic2par(self.x0[0:3], self.x0[3:6], self.leg.mu))
# Eccentric to Mean Anomaly
elem0[5] = elem0[5] - elem0[1] * np.sin(elem0[5])
# Mean Anomaly at the target orbit
Mf = z[1] - self.elemf[1] * np.sin(z[1])
elemf = np.hstack([self.elemf[:5], [Mf]])
# Keplerian elements
kep0 = pk.planet.keplerian(t0, elem0)
kepf = pk.planet.keplerian(tf, self.elemf)
# plot departure and arrival
pk.orbit_plots.plot_planet(
kep0, t0, units=units, color=(0.8, 0.8, 0.8), axes=axes)
pk.orbit_plots.plot_planet(
kepf, tf, units=units, color=(0.8, 0.8, 0.8), axes=axes)
def _pretty(self, z):
"""
prob.pretty(x)
Args:
- x (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
Prints human readable information on the trajectory represented by the decision vector x
"""
print("\nPoint to orbit transfer: ")
print("\nFrom (cartesian): ", list(self.x0))
print("To (osculating elements): ", list(self.elemf))
print("Time of flight (days): {!r} ".format(z[0]))
print("Arrival mean anomaly (rad): {!r} ".format(z[1]))
class indirect_pt2pl(_indirect_base):
"""
Represents an indirect trajectory optimisation problem between a Cartesian state and a planet (rendezvous).
Since the terminal conditions on the planet are not fixed, the transversality condition H=0 is deactivated
and optimization of T happens via an explicit minimization of the objective (hybrid direct-indirect method)
Decision chromosome is
::
z = [T, l0]
"""
def __init__(self,
x0=[-24482087316.947845, -150000284705.77328, -196089391.29376224,
31677.87649549203, -5859.747563624047, -351.75278222719828, 1000],
pf="mars",
mass=1000,
thrust=0.3,
isp=3000,
tof=[230, 280],
t0=1251.0286746844447,
mu=pk.MU_SUN,
alpha=0,
bound=False,
atol=1e-12,
rtol=1e-12
):
"""Initialises ``pykep.trajopt.indirect_pt2or`` problem.
Args:
- x0 (``list``, ``tuple``, ``numpy.ndarray``): Departure state [m, m, m, m/s, m/s, m/s, kg].
- pf (``str``): Arrival planet name. (will be used to construct a planet.jpl_lp object) - mass (``float``, ``int``): Spacecraft wet mass [kg].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- atol (``float``, ``int``): Absolute integration solution tolerance.
- rtol (``float``, ``int``): Relative integration solution tolerance.
- tof (``list``): Transfer time bounds [days].
- t0 (``float``): launch epoch [MJD2000].
- freetime (``bool``): Activates final time transversality condition. Allows final time to vary.
- alpha (``float``, ``int``): Homotopy parameter, governing the degree to which the theoretical control law is intended to reduce propellant expenditure or energy.
- bound (``bool``): Activates bounded control, in which the control throttle is bounded between 0 and 1, otherwise the control throttle is allowed to unbounded.
- mu (``float``): Gravitational parameter of primary body [m^3/s^2].
"""
# initialise base
_indirect_base.__init__(
self, mass, thrust, isp, mu, True, False, alpha, bound,
atol, rtol
)
# departure epoch
self.t0 = pk.epoch(t0)
# departure state
self.x0 = np.asarray(x0, np.float64)
# arrival planet
self.pf = pk.planet.jpl_lp(pf)
# bounds on the time of flight
self.tof = tof
# store the alfa value (immutable)
self._alpha = alpha
def fitness(self, z):
# times
t0 = self.t0
tf = pk.epoch(t0.mjd2000 + z[0])
# intial costates
l0 = np.asarray(z[1:])
# arrival conditions
rf, vf = self.pf.eph(tf)
# departure state
x0 = pk.sims_flanagan.sc_state(self.x0[0:3], self.x0[3:6], self.x0[6])
# arrival state (mass will be ignored)
xf = pk.sims_flanagan.sc_state(rf, vf, self.sc.mass / 10)
# set leg
self.leg.set(t0, x0, l0, tf, xf)
# equality constraints
ceq = self.leg.mismatch_constraints(atol=self.atol, rtol=self.rtol)
obj = self.leg.trajectory[-1, -1] * self.leg._dynamics.c2 * 1000
return np.hstack(([obj], ceq))
def get_bounds(self):
lb = [self.tof[0]] + [-1e2] * 7
ub = [self.tof[1]] + [1e2] * 7
return (lb, ub)
def _plot_traj(self, z, axes, units=pk.AU):
"""Plots spacecraft trajectory.
Args:
- z (``tuple``, ``list``, ``numpy.ndarray``): Decision chromosome.
- axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
- units (``float``, ``int``): Length unit by which to normalise data.
Examples:
>>> prob.extract(pykep.trajopt.indirect_pt2or).plot_traj(pop.champion_x)
"""
# states
x0 = self.x0
# times
t0 = self.t0
tf = pk.epoch(t0.mjd2000 + z[0])
# Computes the osculating Keplerian elements at start
elem0 = list(pk.ic2par(x0[0:3], x0[3:6], self.leg.mu))
# Converts the eccentric anomaly into eccentric anomaly
elem0[5] = elem0[5] - elem0[1] * np.sin(elem0[5])
# Creates a virtual keplerian planet with the said elements
kep0 = pk.planet.keplerian(t0, elem0)
# Plots the departure and arrival osculating orbits
pk.orbit_plots.plot_planet(
kep0, t0, units=units, color=(0.8, 0.8, 0.8), axes=axes)
pk.orbit_plots.plot_planet(
self.pf, tf, units=units, color=(0.8, 0.8, 0.8), axes=axes)
def _pretty(self, z):
print("\nPlanet to orbit transfer, alpha is: ", self._alpha)
print("\nFrom (cartesian): " + str(list(self.x0)))
print("Launch epoch: {!r} MJD2000, a.k.a. {!r}".format(
self.t0.mjd2000, self.t0))
print("\nTo (planet): " + self.pf.name)
print("Time of flight (days): {!r} ".format(z[0]))
| gpl-3.0 |
LiaoPan/blaze | blaze/compute/tests/test_sparksql.py | 3 | 14017 | from __future__ import absolute_import, print_function, division
import pytest
pyspark = pytest.importorskip('pyspark')
py4j = pytest.importorskip('py4j')
sa = pytest.importorskip('sqlalchemy')
import os
import itertools
import shutil
from py4j.protocol import Py4JJavaError
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from blaze import compute, symbol, into, by, sin, exp, cos, tan, join
try:
from pyspark.sql import DataFrame as SparkDataFrame
except ImportError:
from pyspark.sql import SchemaRDD as SparkDataFrame
from pyspark import HiveContext, SQLContext
from pyspark.sql import Row, SchemaRDD
from odo import odo, discover
from odo.utils import tmpfile
data = [['Alice', 100.0, 1],
['Bob', 200.0, 2],
['Alice', 50.0, 3]]
date_data = []
np.random.seed(0)
for attr in ('YearBegin', 'MonthBegin', 'Day', 'Hour', 'Minute', 'Second'):
rng = pd.date_range(start='now', periods=len(data),
freq=getattr(pd.datetools, attr)()).values
date_data += list(zip(np.random.choice(['Alice', 'Bob', 'Joe', 'Lester'],
size=len(data)),
np.random.rand(len(data)) * 100,
np.random.randint(100, size=3),
rng))
cities_data = [['Alice', 'NYC'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
date_df = pd.DataFrame(date_data, columns=['name', 'amount', 'id', 'ds'])
cities_df = pd.DataFrame(cities_data, columns=['name', 'city'])
# sc is from conftest.py
@pytest.yield_fixture(scope='module')
def sql(sc):
try:
if hasattr(pyspark.sql, 'types'): # pyspark >= 1.3
yield HiveContext(sc)
else:
yield SQLContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath)
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath)
os.remove(logpath)
@pytest.yield_fixture(scope='module')
def people(sc):
with tmpfile('.txt') as fn:
df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2])))
@pytest.yield_fixture(scope='module')
def cities(sc):
with tmpfile('.txt') as fn:
cities_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0], city=person[1]))
@pytest.yield_fixture(scope='module')
def date_people(sc):
with tmpfile('.txt') as fn:
date_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2]),
ds=pd.Timestamp(person[3]).to_pydatetime()))
@pytest.fixture(scope='module')
def ctx(sql, people, cities, date_people):
try:
sql.registerDataFrameAsTable(sql.createDataFrame(people), 't')
sql.cacheTable('t')
sql.registerDataFrameAsTable(sql.createDataFrame(cities), 's')
sql.cacheTable('s')
sql.registerDataFrameAsTable(sql.createDataFrame(date_people), 'dates')
sql.cacheTable('dates')
except AttributeError:
sql.inferSchema(people).registerTempTable('t')
sql.inferSchema(cities).registerTempTable('s')
sql.inferSchema(date_people).registerTempTable('dates')
return sql
@pytest.fixture(scope='module')
def db(ctx):
return symbol('db', discover(ctx))
def test_projection(db, ctx):
expr = db.t[['id', 'name']]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_symbol_compute(db, ctx):
assert isinstance(compute(db.t, ctx), (SparkDataFrame, SchemaRDD))
def test_field_access(db, ctx):
for field in db.t.fields:
expr = getattr(db.t, field)
result = into(pd.Series, compute(expr, ctx))
expected = compute(expr, {db: {'t': df}})
assert result.name == expected.name
np.testing.assert_array_equal(result.values,
expected.values)
def test_head(db, ctx):
expr = db.t[['name', 'amount']].head(2)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result) == into(list, expected)
def test_literals(db, ctx):
expr = db.t[db.t.amount >= 100]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_by_summary(db, ctx):
t = db.t
expr = by(t.name, mymin=t.amount.min(), mymax=t.amount.max())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_join(db, ctx):
expr = join(db.t, db.s)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert isinstance(result, (SparkDataFrame, SchemaRDD))
assert into(set, result) == into(set, expected)
assert discover(result) == expr.dshape
def test_join_diff_contexts(db, ctx, cities):
expr = join(db.t, db.s, 'name')
people = ctx.table('t')
cities = into(ctx, cities, dshape=discover(ctx.table('s')))
scope = {db: {'t': people, 's': cities}}
result = compute(expr, scope)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert (set(map(frozenset, odo(result, set))) ==
set(map(frozenset, odo(expected, set))))
def test_field_distinct(ctx, db):
expr = db.t.name.distinct()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_boolean(ctx, db):
expr = db.t.amount > 50
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_selection(ctx, db):
expr = db.t[db.t.amount > 50]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_selection_field(ctx, db):
expr = db.t[db.t.amount > 50].name
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(['field', 'reduction'],
itertools.product(['id', 'amount'], ['sum', 'max',
'min', 'mean',
'count',
'nunique']))
def test_reductions(ctx, db, field, reduction):
expr = getattr(db.t[field], reduction)()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result)[0][0] == expected
def test_column_arithmetic(ctx, db):
expr = db.t.amount + 1
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
# pyspark doesn't use __version__ so we use this kludge
# should submit a bug report upstream to get __version__
def fail_on_spark_one_two(x):
if hasattr(pyspark.sql, 'types'):
return x
else:
return pytest.mark.xfail(x, raises=py4j.protocol.Py4JJavaError,
reason=('math functions only supported in '
'HiveContext'))
@pytest.mark.parametrize('func', list(map(fail_on_spark_one_two,
[sin, cos, tan, exp])))
def test_math(ctx, db, func):
expr = func(db.t.amount)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
np.testing.assert_allclose(np.sort(odo(result, np.ndarray,
dshape=expr.dshape)),
np.sort(odo(expected, np.ndarray)))
@pytest.mark.parametrize(['field', 'ascending'],
itertools.product(['name', 'id', ['name', 'amount']],
[True, False]))
def test_sort(ctx, db, field, ascending):
expr = db.t.sort(field, ascending=ascending)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
@pytest.mark.xfail
def test_map(ctx, db):
expr = db.t.id.map(lambda x: x + 1, 'int')
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(['grouper', 'reducer', 'reduction'],
itertools.chain(itertools.product(['name', 'id',
['id', 'amount']],
['id', 'amount'],
['sum', 'count',
'max', 'min',
'mean',
'nunique']),
[('name', 'name', 'count'),
('name', 'name', 'nunique')]))
def test_by(ctx, db, grouper, reducer, reduction):
t = db.t
expr = by(t[grouper], total=getattr(t[reducer], reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
@pytest.mark.parametrize(['reducer', 'reduction'],
itertools.product(['id', 'name'],
['count', 'nunique']))
def test_multikey_by(ctx, db, reducer, reduction):
t = db.t
expr = by(t[['id', 'amount']], total=getattr(getattr(t, reducer),
reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
def test_grouper_with_arith(ctx, db):
expr = by(db.t[['id', 'amount']], total=(db.t.amount + 1).sum())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_by_non_native_ops(ctx, db):
expr = by(db.t.id, total=db.t.id.nunique())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
@pytest.mark.xfail(not hasattr(pyspark.sql, 'types'),
reason=('length string function not available without '
'HiveContext'),
raises=py4j.protocol.Py4JJavaError)
def test_strlen(ctx, db):
expr = db.t.name.strlen()
result = odo(compute(expr, ctx), pd.Series)
expected = compute(expr, {db: {'t': df}})
assert result.name == 'name'
assert expected.name == 'name'
assert odo(result, set) == odo(expected, set)
date_attrs = [pytest.mark.xfail(not hasattr(pyspark.sql, 'types'),
attr,
raises=(Py4JJavaError, AssertionError),
reason=('date attribute %r not supported '
'without hive') % attr)
for attr in ['year', 'month', 'day', 'hour', 'minute', 'second']]
date_attrs += [pytest.mark.xfail(attr,
raises=Py4JJavaError,
reason=('Hive does not support date '
'attribute %r') % attr)
for attr in ['millisecond', 'microsecond']]
@pytest.mark.parametrize('attr', date_attrs)
def test_by_with_date(ctx, db, attr):
# TODO: investigate CSV writing precision between pandas 0.16.0 and 0.16.1
# TODO: see if we can use odo to convert the dshape of an existing
# DataFrame
expr = by(getattr(db.dates.ds, attr),
mean=db.dates.amount.mean())
result = odo(compute(expr, ctx), pd.DataFrame).sort('mean').reset_index(drop=True)
expected = compute(expr, {db: {'dates': date_df}}).sort('mean').reset_index(drop=True)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('keys', [[1], [1, 2]])
def test_isin(ctx, db, keys):
expr = db.t[db.t.id.isin(keys)]
result = odo(compute(expr, ctx), set)
expected = odo(compute(expr, {db: {'t': df}}), set)
assert (set(map(frozenset, odo(result, list))) ==
set(map(frozenset, odo(expected, list))))
def test_nunique_spark_dataframe(ctx, db):
assert (odo(compute(db.t.nunique(), ctx), int) ==
ctx.table('t').distinct().count())
| bsd-3-clause |
PmagPy/PmagPy | programs/strip_magic.py | 2 | 14657 | #!/usr/bin/env python
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
strip_magic.py
DESCRIPTION
plots various parameters versus depth or age
SYNTAX
strip_magic.py [command line optins]
OPTIONS
-h prints help message and quits
-DM NUM: specify data model num, options 2 (legacy) or 3 (default)
-f FILE: specify input magic format file from magic,default='pmag_results.txt'
supported types=[pmag_specimens, pmag_samples, pmag_sites, pmag_results, magic_web]
-obj [sit,sam,all]: specify object to site,sample,all for pmag_result table, default is all
-fmt [svg,png,jpg], format for images - default is svg
-x [age,pos]: specify whether age or stratigraphic position
-y [dec,inc,int,chi,lat,lon,vdm,vadm]
(lat and lon are VGP lat and lon)
-Iex: plot the expected inc at lat - only available for results with lat info in file
-ts TS amin amax: plot the GPTS for the time interval between amin and amax (numbers in Ma)
TS: [ck95, gts04]
-mcd method_code, specify method code, default is first one encountered
-sav save plot and quit
NOTES
when x and/or y are not specified, a list of possibilities will be presented to the user for choosing
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
xaxis, xplotind, yplotind = "", 0, 0 # (0 for strat pos)
yaxis, Xinc = "", ""
plot = 0
obj = 'all'
data_model_num = int(pmag.get_named_arg("-DM", 3))
# 2.5 keys
if data_model_num == 2:
supported = ['pmag_specimens', 'pmag_samples',
'pmag_sites', 'pmag_results', 'magic_web'] # available file types
Depth_keys = ['specimen_core_depth', 'specimen_height', 'specimen_elevation',
'specimen_composite_depth', 'sample_core_depth', 'sample_height',
'sample_elevation', 'sample_composite_depth', 'site_core_depth',
'site_height', 'site_elevation', 'site_composite_depth', 'average_height']
Age_keys = ['specimen_inferred_age', 'sample_inferred_age',
'site_inferred_age', 'average_age']
Unit_keys = {'specimen_inferred_age': 'specimen_inferred_age_unit',
'sample_inferred_age': 'sample_inferred_age_unit',
'site_inferred_age': 'site_inferred_age_unit', 'average_age': 'average_age_unit'}
Dec_keys = ['measurement_dec', 'specimen_dec',
'sample_dec', 'site_dec', 'average_dec']
Inc_keys = ['measurement_inc', 'specimen_inc',
'sample_inc', 'site_inc', 'average_inc']
Int_keys = ['measurement_magnitude', 'measurement_magn_moment', 'measurement_magn_volume',
'measurement_magn_mass', 'specimen_int', 'specimen_int_rel', 'sample_int',
'sample_int_rel', 'site_int', 'site_int_rel', 'average_int', 'average_int_rel']
Chi_keys = ['measurement_chi_volume', 'measurement_chi_mass']
Lat_keys = ['sample_lat', 'site_lat', 'average_lat']
VLat_keys = ['vgp_lat']
VLon_keys = ['vgp_lon']
Vdm_keys = ['vdm']
Vadm_keys = ['vadm']
method_col_name = "magic_method_codes"
else:
# 3.0 keys
supported = ["specimens", "samples", "sites", "locations"] # available file types
Depth_keys = [ "height", "core_depth", "elevation", "composite_depth" ]
Age_keys = [ "age" ]
Unit_keys = { "age": "age" }
Chi_keys = [ "susc_chi_volume", "susc_chi_mass" ]
Int_keys = [ "magn_moment", "magn_volume", "magn_mass", "int_abs", "int_rel" ]
Inc_keys = [ "dir_inc" ]
Dec_keys = [ "dir_dec" ]
Lat_Keys = [ "lat" ]
VLat_keys = [ "vgp_lat", "pole_lat" ]
VLon_keys = [ "vgp_lon", "pole_lon" ]
Vdm_keys = [ "vdm", "pdm" ]
Vadm_keys = [ "vadm", "padm" ]
method_col_name = "method_codes"
#
X_keys = [Age_keys, Depth_keys]
Y_keys = [Dec_keys, Inc_keys, Int_keys, Chi_keys,
VLat_keys, VLon_keys, Vdm_keys, Vadm_keys]
method, fmt = "", 'svg'
FIG = {'strat': 1}
plotexp, pTS = 0, 0
dir_path = pmag.get_named_arg("-WD", ".")
# default files
if data_model_num == 3:
res_file = pmag.get_named_arg("-f", "sites.txt")
else:
res_file = pmag.get_named_arg("-f", "pmag_results.txt")
res_file = pmag.resolve_file_name(res_file, dir_path)
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-obj' in sys.argv:
ind = sys.argv.index('-obj')
obj = sys.argv[ind+1]
if '-x' in sys.argv:
ind = sys.argv.index('-x')
xaxis = sys.argv[ind+1]
if '-y' in sys.argv:
ind = sys.argv.index('-y')
yaxis = sys.argv[ind+1]
if yaxis == 'dec':
ykeys = Dec_keys
if yaxis == 'inc':
ykeys = Inc_keys
if yaxis == 'int':
ykeys = Int_keys
if yaxis == 'chi':
ykeys = Chi_keys
if yaxis == 'lat':
ykeys = VLat_keys
if yaxis == 'lon':
ykeys = VLon_keys
if yaxis == 'vdm':
ykeys = Vdm_keys
if yaxis == 'vadm':
ykeys = Vadm_keys
if '-mcd' in sys.argv:
ind = sys.argv.index('-mcd')
method = sys.argv[ind+1]
if '-ts' in sys.argv:
ind = sys.argv.index('-ts')
ts = sys.argv[ind+1]
amin = float(sys.argv[ind+2])
amax = float(sys.argv[ind+3])
pTS = 1
if '-Iex' in sys.argv:
plotexp = 1
if '-sav' in sys.argv:
plot = 1
#
#
# get data read in
Results, file_type = pmag.magic_read(res_file)
if file_type not in supported:
print("Unsupported file type ({}), try again".format(file_type))
sys.exit()
PltObjs = ['all']
if data_model_num == 2:
if file_type == 'pmag_results': # find out what to plot
for rec in Results:
resname = rec['pmag_result_name'].split()
if 'Sample' in resname and 'sam' not in PltObjs:
PltObjs.append('sam')
if 'Site' in resname and 'sit' not in PltObjs:
PltObjs.append('sit')
methcodes = []
# need to know all the measurement types from method_codes
if "magic_method_codes" in list(Results[0].keys()):
for rec in Results:
meths = rec["magic_method_codes"].split(":")
for meth in meths:
if meth.strip() not in methcodes and 'LP' in meth:
# look for the lab treatments
methcodes.append(meth.strip())
#
# initialize some variables
X_unit = "" # Unit for age or depth plotting (meters if depth)
Xplots, Yplots = [], []
Xunits = []
yplotind, xplotind = 0, 0
#
# step through possible plottable keys
#
if xaxis == "" or yaxis == "":
for key in list(Results[0].keys()):
for keys in X_keys:
for xkeys in keys:
if key in xkeys:
for ResRec in Results:
if ResRec[key] != "":
# only plot something if there is something to plot!
Xplots.append(key)
break
for keys in Y_keys:
for pkeys in keys:
if key in pkeys:
for ResRec in Results:
if ResRec[key] != "":
Yplots.append(key)
break
X, Y = [], []
for plt in Xplots:
if plt in Age_keys and 'age' not in X:
X.append('age')
if plt in Depth_keys and 'pos' not in X:
X.append('pos')
for plt in Yplots:
if plt in Dec_keys and 'dec' not in Y:
Y.append('dec')
if plt in Inc_keys and 'inc' not in Y:
Y.append('inc')
if plt in Int_keys and 'int' not in Y:
Y.append('int')
if plt in Chi_keys and 'chi' not in Y:
Y.append('chi')
if plt in VLat_keys and 'lat' not in Y:
Y.append('lat')
if plt in VLon_keys and 'lon' not in Y:
Y.append('lon')
if plt in Vadm_keys and 'vadm' not in Y:
Y.append('vadm')
if plt in Vdm_keys and 'vdm' not in Y:
Y.append('vdm')
if file_type == 'pmag_results':
print('available objects for plotting: ', PltObjs)
print('available X plots: ', X)
print('available Y plots: ', Y)
print('available method codes: ', methcodes)
f = open(dir_path+'/.striprc', 'w')
for x in X:
f.write('x:'+x+'\n')
for y in Y:
f.write('y:'+y+'\n')
for m in methcodes:
f.write('m:'+m+'\n')
for obj in PltObjs:
f.write('obj:'+obj+'\n')
sys.exit()
if plotexp == 1:
for lkey in Lat_keys:
for key in list(Results[0].keys()):
if key == lkey:
lat = float(Results[0][lkey])
Xinc = [pmag.pinc(lat), -pmag.pinc(lat)]
break
if Xinc == "":
print('can not plot expected inc for site - lat unknown')
if method != "" and method not in methcodes:
print('your method not available, but these are: ')
print(methcodes)
print('use ', methcodes[0], '? ^D to quit')
if xaxis == 'age':
for akey in Age_keys:
for key in list(Results[0].keys()):
if key == akey:
Xplots.append(key)
Xunits.append(Unit_keys[key])
if xaxis == 'pos':
for dkey in Depth_keys:
for key in list(Results[0].keys()):
if key == dkey:
Xplots.append(key)
if len(Xplots) == 0:
print('desired X axis information not found')
sys.exit()
if xaxis == 'age':
age_unit = Results[0][Xunits[0]]
if len(Xplots) > 1:
print('multiple X axis keys found, using: ', Xplots[xplotind])
for ykey in ykeys:
for key in list(Results[0].keys()):
if key == ykey:
Yplots.append(key)
if len(Yplots) == 0:
print('desired Y axis information not found')
sys.exit()
if len(Yplots) > 1:
print('multiple Y axis keys found, using: ', Yplots[yplotind])
# check if age or depth info
if len(Xplots) == 0:
print("Must have either age or height info to plot ")
sys.exit()
#
# check for variable to plot
#
#
# determine X axis (age or depth)
#
if xaxis == "age":
plotind = "1"
if method == "":
try:
method = methcodes[0]
except IndexError:
method = ""
if xaxis == 'pos':
xlab = "Stratigraphic Height (meters)"
else:
xlab = "Age ("+age_unit+")"
Xkey = Xplots[xplotind]
Ykey = Yplots[yplotind]
ylab = Ykey
#
# collect the data for plotting
XY = []
isign = 1.
# if float(Results[0][Xkey])/float(Results[-1][Xkey])>0 and float(Results[0][Xkey])<0:
# isign=-1. # x axis all same sign and negative, take positive (e.g.,for depth in core)
# xlab="Stratigraphic Position (meters)"
# else:
# isign=1.
for rec in Results:
if "magic_method_codes" in list(rec.keys()):
meths = rec["magic_method_codes"].split(":")
if method in meths: # make sure it is desired lab treatment step
if obj == 'all' and rec[Xkey].strip() != "":
XY.append([isign*float(rec[Xkey]), float(rec[Ykey])])
elif rec[Xkey].strip() != "":
name = rec['pmag_result_name'].split()
if obj == 'sit' and "Site" in name:
XY.append([isign*float(rec[Xkey]), float(rec[Ykey])])
if obj == 'sam' and "Sample" in name:
XY.append([isign*float(rec[Xkey]), float(rec[Ykey])])
elif method == "":
if obj == 'all' and rec[Xkey].strip() != "":
XY.append([isign*float(rec[Xkey]), float(rec[Ykey])])
elif rec[Xkey].strip() != "":
name = rec['pmag_result_name'].split()
if obj == 'sit' and "Site" in name:
XY.append([isign*float(rec[Xkey]), float(rec[Ykey])])
if obj == 'sam' and "Sample" in name:
XY.append([isign*float(rec[Xkey]), float(rec[Ykey])])
else:
print("Something wrong with your plotting choices")
break
XY.sort()
title = ""
if "er_locations_names" in list(Results[0].keys()):
title = Results[0]["er_location_names"]
if "er_locations_name" in list(Results[0].keys()):
title = Results[0]["er_location_name"]
labels = [xlab, ylab, title]
pmagplotlib.plot_init(FIG['strat'], 10, 5)
pmagplotlib.plot_strat(FIG['strat'], XY, labels) # plot them
if plotexp == 1:
pmagplotlib.plot_hs(FIG['strat'], Xinc, 'b', '--')
if yaxis == 'inc' or yaxis == 'lat':
pmagplotlib.plot_hs(FIG['strat'], [0], 'b', '-')
pmagplotlib.plot_hs(FIG['strat'], [-90, 90], 'g', '-')
if pTS == 1:
FIG['ts'] = 2
pmagplotlib.plot_init(FIG['ts'], 10, 5)
pmagplotlib.plot_ts(FIG['ts'], [amin, amax], ts)
files = {}
for key in list(FIG.keys()):
files[key] = key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
files = {}
files['strat'] = xaxis+'_'+yaxis+'_.'+fmt
files['ts'] = 'ts.'+fmt
titles = {}
titles['strat'] = 'Depth/Time Series Plot'
titles['ts'] = 'Time Series Plot'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
pmagplotlib.save_plots(FIG, files)
elif plot == 1:
pmagplotlib.save_plots(FIG, files)
else:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, [q]uit without saving: ")
if ans == "a":
pmagplotlib.save_plots(FIG, files)
if __name__ == "__main__":
main()
| bsd-3-clause |
passiweinberger/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| agpl-3.0 |
lauringlab/variant_pipeline | scripts/concat_seg.py | 1 | 2479 | from Bio.Seq import Seq
from Bio import SeqIO
import argparse
import glob
import pandas as pd
import os
parser = argparse.ArgumentParser(description='This script is designed to concatenate the designated segment from a directory of sample fasta files')
parser.add_argument('dir', metavar='dir', nargs='+',
help='The directory that contains the fasta files')
parser.add_argument('seg', metavar='seg', nargs='+',
help='The segment to be taken from each file' )
parser.add_argument('key', metavar='key', nargs='+',
help='The Key to be used to add metadata to each sequence' )
parser.add_argument('out_fa', metavar='out_fa', nargs='+',
help='The output file')
args = parser.parse_args()
def ReadFASTA(fastafile):
"""Reads sequences from a FASTA file.
'fastafile' should specify the name of a FASTA file.
This function reads all sequences from the FASTA file. It returns the
list 'headers_seqs'. This list is composed of a seq_record objects.
"""
seqs =[]
header = None
for seq_record in SeqIO.parse(fastafile, "fasta"):
seqs.append(seq_record)
return seqs
meta_df=pd.read_csv(args.key[0])
lauring_id=list(meta_df["Id"])
print lauring_id
selected_seg=[]
for fa in glob.glob(args.dir[0]+"/*.fa"):
seqs=ReadFASTA(fa)
for seq in seqs:
if (seq.id==args.seg[0]):
Id=os.path.basename(fa).split('.')[0]
try:
if int(Id) in lauring_id:
i=lauring_id.index(int(Id))
intervention=list(meta_df["Intervention"])[i]
if args.seg[0]=="HA":
geom=int(list(meta_df["HAI.geo"])[i])
if args.seg[0]=="NR":
geom=int(list(meta_df["NAI.geo"])[i])
date=list(meta_df["collection_date"])[i]
print Id
else:
print "didn't find" + Id
intervention="NA"
geom="NA"
date="NA"
except ValueError:
print "value error for " + Id
intervention="NA"
geom="NA"
date="NA"
seq.id=str(Id)+"_"+str(intervention)+"_"+str(geom)+"_"+str(date)
seq.description=""
selected_seg.append(seq)
SeqIO.write(selected_seg, args.out_fa[0], "fasta")
| apache-2.0 |
hanteng/country-groups | scripts/_construct_data_ASEM.py | 1 | 4193 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# Correction: 0->ASEAN, 49-> GB, 52 ->KR
import os.path, glob
import requests
from lxml.html import fromstring, tostring, parse
from io import StringIO, BytesIO
import codecs
import pandas as pd
import json
XML_encoding="utf-8"
# Data source
URL_ = "http://www.aseminfoboard.org/members"
URL_country_names_template = "https://raw.githubusercontent.com/hanteng/country-names/master/data/CLDR_country_name_{locale}.tsv"
URL_country_names = URL_country_names_template.format(locale= 'en')
# Xpath extraction
_xpath='//*[@id="block-views-members-block"]/div/div/div[2]/table/tbody/tr/td/div[2]/span/a/text()'
## Outpuing Lists
PE = 'ASEM'
path_data = u'../data'
outputfn1 = os.path.join(path_data, "PE_org.json")
outputfn2 = os.path.join(path_data, "CLDR_UN_region.tsv")
def url_request (url):
r = requests.get(url)
if r.status_code == 200:
#r.raw.decode_content = True
return r
else:
print ("Downloading the data from {0} failed. Plese check Internet connections.".format(XML_src_url))
return None
def url_local_request (url):
fn_local = os.path.join(path_data, PE+ ".htm")
print (fn_local) #debug
try:
tree = parse(fn_local)
except:
r = url_request (url)
XML_src=r.content
with codecs.open(fn_local, "w", XML_encoding) as file:
file.write(XML_src.decode(XML_encoding))
#from lxml.html.clean import clean_html
#XML_src = clean_html(XML_src)
tree = fromstring(XML_src)
return tree
t = url_local_request(URL_)
list_country_names_Web = t.xpath(_xpath)
print (list_country_names_Web)
## Retrive data directly from unicode-cldr project hosted at github
print ("Retrieve country names data now ...")
locale = "en"
url = URL_country_names_template.format(locale=locale)
df_results = pd.read_csv(url, sep='\t', encoding='utf-8',
na_values=[], keep_default_na = False,
names = ['c','n'] , index_col='c',
)
## Construct dictionary for country/region names
c_names = df_results.to_dict()['n'] #http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_dict.html
c_names_inv = {v: k for k, v in c_names.items()}
## Country names fuzzy match
from fuzzywuzzy import process
choice=[]
for i, c_name_Web in enumerate(list_country_names_Web):
#found_candidates = [x for x in c_names_inv.keys() if fuzzy_match(x,c_name_Web)==True]
found_candidate = process.extract(c_name_Web, c_names_inv.keys(), limit=1)
found_candidate_c = c_names_inv[found_candidate[0][0]]
choice_item = [i, c_name_Web, found_candidate, found_candidate_c]
#print (choice_item)
choice.append(choice_item)
import ast
done = False
while not(done):
try:
# Note: Python 2.x users should use raw_input, the equivalent of 3.x's input
prn= [repr(x) for x in choice]
print ("\n\r".join(prn))
i = int(input("Please enter your corrections: Serial no (-1:None): "))
if i==-1:
print ("Done!")
done==True
break
else:
if i in range(len(choice)):
c = input("Please enter your corrections: Country code (ISO-alpha2): ")
choice[i][3] = c
else:
print("Sorry, Please revise your input.")
except ValueError:
print("Sorry, I didn't understand that.")
#better try again... Return to the start of the loop
continue
list_country_codes_Web = [x[3] for x in choice]
print (list_country_codes_Web)
print (list_country_names_Web)
print ("==========")
PE_org = dict()
with codecs.open(outputfn1, encoding='utf-8', mode='r+') as fp:
lines=fp.readlines()
PE_org = json.loads(u"".join(lines))
print (PE_org)
d={PE: list_country_codes_Web}
print(d)
PE_org.update(d)
print (PE_org)
with codecs.open(outputfn1, encoding='utf-8', mode='w') as fp:
json.dump(PE_org, fp)
| gpl-3.0 |
xuewei4d/scikit-learn | examples/classification/plot_lda_qda.py | 25 | 5449 | """
====================================================================
Linear and Quadratic Discriminant Analysis with covariance ellipsoid
====================================================================
This example plots the covariance ellipsoids of each class and
decision boundary learned by LDA and QDA. The ellipsoids display
the double standard deviation for each class. With LDA, the
standard deviation is the same for all the classes, while each
class has its own standard deviation with QDA.
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# #############################################################################
# Colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
# #############################################################################
# Generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
# #############################################################################
# Plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with\n fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with\n varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
# class 0: dots
plt.scatter(X0_tp[:, 0], X0_tp[:, 1], marker='.', color='red')
plt.scatter(X0_fp[:, 0], X0_fp[:, 1], marker='x',
s=20, color='#990000') # dark red
# class 1: dots
plt.scatter(X1_tp[:, 0], X1_tp[:, 1], marker='.', color='blue')
plt.scatter(X1_fp[:, 0], X1_fp[:, 1], marker='x',
s=20, color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.), zorder=0)
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='white')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'*', color='yellow', markersize=15, markeredgecolor='grey')
plt.plot(lda.means_[1][0], lda.means_[1][1],
'*', color='yellow', markersize=15, markeredgecolor='grey')
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color,
edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariance_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariance_[1], 'blue')
plt.figure(figsize=(10, 8), facecolor='white')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis',
y=0.98, fontsize=15)
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariance=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.tight_layout()
plt.subplots_adjust(top=0.92)
plt.show()
| bsd-3-clause |
tmhm/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
drichards2/strikex-processing-tools | python/utilities/lowndes-converter/lowndes/lowndes.py | 1 | 4763 | """Tools to read and write Lowndes files"""
import os.path
import pandas
BELL_IDENTIFIERS = ( '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O', 'E', 'T', 'A', 'B', 'C', 'D' )
def read( infile ):
"""Read a file in Lowndes format.
Args:
infile (str): Filename of the Lowndes file to read
Returns:
Dictionary with two keys - one key 'info' containing metadata from the file and another
'strikedata' containing a pandas DataFrame with the strike information"""
strike_count = 0
base_offset = 0
last_timestamp = 0
metadata = {}
metadata['basename'] = os.path.split(infile)[1]
strikedata = { 'handstroke': [], 'bell': [], 'time': [] }
line_count = 1
with open( infile, 'rt') as istrm:
for line in istrm:
line = line.strip()
line_count += 1
if '#.' in line:
if 'Lowndes:' in line:
metadata['version'] = line[12:]
elif 'Creator:' in line:
metadata['creator'] = line[12:]
elif 'TranscriptionDate:' in line:
metadata['transcription_date'] = line[22:]
elif 'FirstBlowMs:' in line:
metadata['first_blow'] = int(line[16:])
else:
blowdata = line.split()
if len(blowdata) == 3:
handstroke = blowdata[0] == 'H'
if blowdata[1] not in BELL_IDENTIFIERS:
print 'Unrecognised bell identifier {0} at line {1}'.format( blowdata[1], line_count )
continue
bell = BELL_IDENTIFIERS.index( blowdata[1] ) + 1
timestamp = int( blowdata[2], 16)
if (timestamp < last_timestamp):
base_offset += 65536
strike_time = timestamp + base_offset
strike_count += 1
last_timestamp = timestamp
strikedata['handstroke'].append( handstroke )
strikedata['bell'].append( bell )
strikedata['time'].append( strike_time )
pandas_frame = pandas.DataFrame( data=strikedata )
if 'bells' not in metadata:
metadata['bells'] = pandas_frame['bell'].max()
return { 'info': metadata, 'strikedata': pandas_frame }
def __bell_to_symbol( bell_number ):
"""Turn a bell number into a alphanumeric symbol representing the bell
Args:
bell_number (int): Bell number to convert
Returns:
Bell symbol as string"""
if (bell_number <= 0) or (bell_number > len( BELL_IDENTIFIERS ) ):
raise '{0} is not a valid bell number'.format( bell_number)
return BELL_IDENTIFIERS[bell_number-1]
def write( outfile, lowndes_data ):
"""Write a file in Lowndes format.
Args:
infile (str): Filename of the Lowndes file to read
lowndes_data:
lowndes_data can be in several different formats. The preferred
is a dictionary containing one key 'info' containing metadata from the file and another
'strikedata' containing a pandas DataFrame with the strike information"""
with open(outfile, 'wt') as ostrm:
if type(lowndes_data)==dict:
if 'info' in lowndes_data:
if 'version' in lowndes_data['info']:
print >> ostrm, '#. Lowndes: {0}'.format( lowndes_data['info']['version'] )
if 'creator' in lowndes_data['info']:
print >> ostrm, '#. Creator: {0}'.format( lowndes_data['info']['creator'] )
if 'transcription_date' in lowndes_data['info']:
print >> ostrm, '#. TranscriptionDate: {0}'.format( lowndes_data['info']['transcription_date'] )
if 'first_blow' in lowndes_data['info']:
print >> ostrm, '#. FirstBlowMs: {0}'.format( lowndes_data['info']['first_blow'] )
if 'strikedata' not in lowndes_data:
raise Exception('No strikedata found in lowndes data')
if type(lowndes_data['strikedata'])==pandas.DataFrame:
for strike in lowndes_data['strikedata'].iterrows():
panda_frame = strike[1]
print >> ostrm, '{0} {1} 0X{2:04x}'.format( 'H' if panda_frame['handstroke'] else 'B', __bell_to_symbol( panda_frame['bell'] ), panda_frame['time'] % 0x10000)
else:
raise Exception('Unrecognised format for strike information in dictionary')
else:
raise Exception('Unrecognised format for Lowndes data')
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/tests/test_contour.py | 6 | 8038 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import datetime
import numpy as np
from matplotlib import mlab
from matplotlib.testing.decorators import cleanup, image_comparison
from matplotlib import pyplot as plt
from nose.tools import assert_equal, assert_raises
import warnings
import re
@cleanup
def test_contour_shape_1d_valid():
x = np.arange(10)
y = np.arange(9)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(x, y, z)
@cleanup
def test_contour_shape_2d_valid():
x = np.arange(10)
y = np.arange(9)
xg, yg = np.meshgrid(x, y)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(xg, yg, z)
@cleanup
def test_contour_shape_mismatch_1():
x = np.arange(9)
y = np.arange(9)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Length of x must be number of columns in z.'
@cleanup
def test_contour_shape_mismatch_2():
x = np.arange(10)
y = np.arange(10)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Length of y must be number of rows in z.'
@cleanup
def test_contour_shape_mismatch_3():
x = np.arange(10)
y = np.arange(10)
xg, yg = np.meshgrid(x, y)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(xg, y, z)
except TypeError as exc:
assert exc.args[0] == 'Number of dimensions of x and y should match.'
try:
ax.contour(x, yg, z)
except TypeError as exc:
assert exc.args[0] == 'Number of dimensions of x and y should match.'
@cleanup
def test_contour_shape_mismatch_4():
g = np.random.random((9, 10))
b = np.random.random((9, 9))
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(b, g, z)
except TypeError as exc:
print(exc.args[0])
assert re.match(
r'Shape of x does not match that of z: ' +
r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.',
exc.args[0]) is not None
try:
ax.contour(g, b, z)
except TypeError as exc:
assert re.match(
r'Shape of y does not match that of z: ' +
r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.',
exc.args[0]) is not None
@cleanup
def test_contour_shape_invalid_1():
x = np.random.random((3, 3, 3))
y = np.random.random((3, 3, 3))
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Inputs x and y must be 1D or 2D.'
@cleanup
def test_contour_shape_invalid_2():
x = np.random.random((3, 3, 3))
y = np.random.random((3, 3, 3))
z = np.random.random((3, 3, 3))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Input z must be a 2D array.'
@image_comparison(baseline_images=['contour_manual_labels'])
def test_contour_manual_labels():
x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10))
z = np.max(np.dstack([abs(x), abs(y)]), 2)
plt.figure(figsize=(6, 2))
cs = plt.contour(x, y, z)
pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)])
plt.clabel(cs, manual=pts)
@image_comparison(baseline_images=['contour_labels_size_color'],
extensions=['png'], remove_text=True)
def test_contour_manual_labels():
x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10))
z = np.max(np.dstack([abs(x), abs(y)]), 2)
plt.figure(figsize=(6, 2))
cs = plt.contour(x, y, z)
pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)])
plt.clabel(cs, manual=pts, fontsize='small', colors=('r', 'g'))
@image_comparison(baseline_images=['contour_manual_colors_and_levels'],
extensions=['png'], remove_text=True)
def test_given_colors_levels_and_extends():
_, axes = plt.subplots(2, 4)
data = np.arange(12).reshape(3, 4)
colors = ['red', 'yellow', 'pink', 'blue', 'black']
levels = [2, 4, 8, 10]
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
filled = i % 2 == 0.
extend = ['neither', 'min', 'max', 'both'][i // 2]
if filled:
last_color = -1 if extend in ['min', 'max'] else None
plt.contourf(data, colors=colors[:last_color], levels=levels,
extend=extend)
else:
last_level = -1 if extend == 'both' else None
plt.contour(data, colors=colors, levels=levels[:last_level],
extend=extend)
plt.colorbar()
@image_comparison(baseline_images=['contour_datetime_axis'],
extensions=['png'], remove_text=False)
def test_contour_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(20)])
y = np.arange(20)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.contour(x, y, z)
plt.subplot(222)
plt.contourf(x, y, z)
x = np.repeat(x[np.newaxis], 20, axis=0)
y = np.repeat(y[:, np.newaxis], 20, axis=1)
plt.subplot(223)
plt.contour(x, y, z)
plt.subplot(224)
plt.contourf(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@image_comparison(baseline_images=['contour_test_label_transforms'],
extensions=['png'], remove_text=True)
def test_labels():
# Adapted from pylab_examples example code: contour_demo.py
# see issues #2475, #2843, and #2818 for explanation
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
fig, ax = plt.subplots(1, 1)
CS = ax.contour(X, Y, Z)
disp_units = [(216, 177), (359, 290), (521, 406)]
data_units = [(-2, .5), (0, -1.5), (2.8, 1)]
CS.clabel()
for x, y in data_units:
CS.add_label_near(x, y, inline=True, transform=None)
for x, y in disp_units:
CS.add_label_near(x, y, inline=True, transform=False)
@image_comparison(baseline_images=['contour_corner_mask_False',
'contour_corner_mask_True'],
extensions=['png'], remove_text=True)
def test_corner_mask():
n = 60
mask_level = 0.95
noise_amp = 1.0
np.random.seed([1])
x, y = np.meshgrid(np.linspace(0, 2.0, n), np.linspace(0, 2.0, n))
z = np.cos(7*x)*np.sin(8*y) + noise_amp*np.random.rand(n, n)
mask = np.where(np.random.rand(n, n) >= mask_level, True, False)
z = np.ma.array(z, mask=mask)
for corner_mask in [False, True]:
fig = plt.figure()
plt.contourf(z, corner_mask=corner_mask)
@cleanup
def test_contourf_decreasing_levels():
# github issue 5477.
z = [[0.1, 0.3], [0.5, 0.7]]
plt.figure()
assert_raises(ValueError, plt.contourf, z, [1.0, 0.0])
# Legacy contouring algorithm gives a warning rather than raising an error,
# plus a DeprecationWarning.
with warnings.catch_warnings(record=True) as w:
plt.contourf(z, [1.0, 0.0], corner_mask='legacy')
assert_equal(len(w), 2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
RangerKD/rpg_svo | svo_analysis/scripts/comparison.py | 17 | 7184 | #!/usr/bin/python
import os
import sys
import time
import rospkg
import numpy as np
import matplotlib.pyplot as plt
import yaml
import argparse
import svo_analysis.analyse_depth as analyse_depth
from matplotlib import rc
# tell matplotlib to use latex font
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def plot_trajectory(ax, filename, label, color, linewidth):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth)
def save_figure(fig, name, directory):
fig.tight_layout()
fig.savefig(os.path.join(directory, name+'.pdf'), bbox_inches="tight")
def distances_along_trajectory(traj):
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_distance(t)
distances.append(sum)
return distances
def get_distance_from_start(gt):
distances = np.diff(gt[:,1:4],axis=0)
distances = np.sqrt(np.sum(np.multiply(distances,distances),1))
distances = np.cumsum(distances)
distances = np.concatenate(([0], distances))
return distances
def compare_results(comp_params, results_dir, comparison_dir):
print('run comparison: '+comp_params['comparison_name'])
line_styles = ['-','--',':']
line_colors = ['b','g','r','m','c']
# -------------------------------------------------------------------------
# plot trajectory:
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='x [m]', ylabel='y [m]')
for exp_set in comp_params['experiment_sets']:
print('processing experiment set: ' + exp_set['label'])
for i, exp in enumerate(exp_set['experiments']):
data = np.loadtxt(os.path.join(results_dir, exp, 'traj_estimate.txt'))
if i == 0:
base_plot, = ax.plot(data[:,1], data[:,2], label=exp_set['label'], linestyle=line_styles[np.mod(i, len(line_styles))])
else:
ax.plot(data[:,1], data[:,2], color=base_plot.get_color(), linestyle= line_styles[np.mod(i, len(line_styles))])
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'trajectory', comparison_dir)
# -------------------------------------------------------------------------
# plot translation error:
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='distance [m]', ylabel='translation drift [mm]')
for exp_set in comp_params['experiment_sets']:
print('processing experiment set: ' + exp_set['label'])
for i, exp in enumerate(exp_set['experiments']):
gt = np.loadtxt(os.path.join(results_dir, exp, 'groundtruth_matched.txt'))
distances = get_distance_from_start(gt)
data = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt'))
e = np.sqrt(np.sum(np.multiply(data[:,1:4],data[:,1:4]),1))
if np.shape(e)[0] > np.shape(distances)[0]:
print('WARNING: estimate has more measurement than groundtruth: '
+str(np.shape(e)[0]-np.shape(distances)[0]))
e = e[0:np.shape(distances)[0]]
distances = distances[0:np.shape(e)[0]]
print '--'
print np.shape(e)
print np.shape(distances)
if i == 0:
base_plot, = ax.plot(distances, e*1000, label=exp_set['label'], linestyle= line_styles[np.mod(i, len(line_styles))])
else:
ax.plot(distances, e*1000, color=base_plot.get_color(), linestyle= line_styles[np.mod(i, len(line_styles))])
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'translation_error', comparison_dir)
# -------------------------------------------------------------------------
# plot depth estimation error:
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, xlabel='Travelled distance [m]', ylabel='Error [m]')
for k, exp_set in enumerate(comp_params['experiment_sets']):
print('plot depth error for experiment: ' + exp)
exp = exp_set['experiments'][0]
gt = np.loadtxt(os.path.join(results_dir, exp, 'groundtruth_matched.txt'))
x_axis_data = get_distance_from_start(gt)
analyse_depth.plot_depth_over_time(os.path.join(results_dir, exp), ax,
x_axis_data[1:], line_colors[k], exp_set['label'])
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'depth_error_textures', comparison_dir)
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, xlabel='Travelled distance [m]', ylabel='Error [m]')
exp_set = comp_params['experiment_sets'][0]
for i, exp in enumerate(exp_set['experiments']):
print('plot depth error for speed: ' + exp)
gt = np.loadtxt(os.path.join(results_dir, exp, 'groundtruth_matched.txt'))
x_axis_data = get_distance_from_start(gt)
params = yaml.load(open(os.path.join(results_dir, exp, 'dataset_params.yaml')))
analyse_depth.plot_depth_over_time(os.path.join(results_dir, exp), ax,
x_axis_data[1:], line_colors[i],
str(params['trajectory_modifiers']['speed'])+' m/s')
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'depth_error_speed', comparison_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare results of a VO pipeline.')
parser.add_argument('comparison_file', help='A YAML file that contains the details of the comparison')
args = parser.parse_args()
# load comparison file
args.experiment_file = args.comparison_file.replace('.yaml','')
comparison_params_file = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'comparisons', args.comparison_file+'.yaml')
if os.path.exists(comparison_params_file):
comp_params = yaml.load(open(comparison_params_file, 'r'))
else:
raise Exception("Provided comparison file does not exist.")
# create folder for comparison results
comparison_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'comparisons', comp_params['comparison_name'])
if not os.path.exists(comparison_dir):
os.makedirs(comparison_dir)
# folder where the results of previous experiments are saved
results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results')
# run comparison
compare_results(comp_params, results_dir, comparison_dir)
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.