repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/backends/backend_wx.py | 6 | 64967 | """
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue ([email protected])
Derived from original copyright work by John Hunter
([email protected])
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
import sys
import os
import os.path
import math
import weakref
import warnings
import numpy as np
import matplotlib
from matplotlib.backend_bases import (RendererBase, GraphicsContextBase,
FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
cursors, TimerBase)
from matplotlib.backend_bases import ShowBase
from matplotlib.backend_bases import _has_pil
from matplotlib._pylab_helpers import Gcf
from matplotlib.cbook import (is_string_like, is_writable_file_like,
warn_deprecated)
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
from . import wx_compat as wxc
import wx
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback
import pdb
_DEBUG_lvls = {1: 'Low ', 2: 'Med ', 3: 'High', 4: 'Error'}
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print("%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls))
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print()
pdb.pm() # jdh uncomment
class fake_stderr(object):
"""
Wx does strange things with stderr, as it makes the assumption that
there is probably no console. This redirects stderr to the console, since
we know that there is one!
"""
def write(self, msg):
print("Stderr: %s\n\r" % msg)
#if _DEBUG < 5:
#sys.excepthook = debug_on_error
#WxLogger =wx.LogStderr()
#sys.stderr = fake_stderr
# the True dots per inch on the screen; should be display dependent
# see
# http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5
# for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog = wx.MessageDialog(parent=parent,
message=msg,
caption='Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class TimerWx(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses WxTimer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timer event to our handler.
# For WX, the events have to use a widget for binding.
self.parent = parent
self._timer = wx.Timer(self.parent, wx.NewId())
self.parent.Bind(wx.EVT_TIMER, self._on_timer, self._timer)
# Unbinding causes Wx to stop for some reason. Disabling for now.
# def __del__(self):
# TimerBase.__del__(self)
# self.parent.Bind(wx.EVT_TIMER, None, self._timer)
def _timer_start(self):
self._timer.Start(self._interval, self._single)
def _timer_stop(self):
self._timer.Stop()
def _timer_set_interval(self):
self._timer_start()
def _timer_set_single_shot(self):
self._timer.Start()
def _on_timer(self, *args):
TimerBase._on_timer(self)
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
# In wxPython, drawing is performed on a wxDC instance, which will
# generally be mapped to the client aread of the window displaying
# the plot. Under wxPython, the wxDC instance has a wx.Pen which
# describes the colour and weight of any lines drawn, and a wxBrush
# which describes the fill colour of any closed polygon.
fontweights = wxc.fontweights
fontangles = wxc.fontangles
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = wxc.fontnames
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
warn_deprecated('2.0', message="The WX backend is "
"deprecated. It's untested "
"and will be removed in Matplotlib 2.2. "
"Use the WXAgg backend instead. "
"See Matplotlib usage FAQ for more info on backends.",
alternative='WXAgg')
RendererBase.__init__(self)
DEBUG_MSG("__init__()", 1, self)
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
# return 1, 1
if ismath:
s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0],
self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
@staticmethod
def convert_path(gfx_ctx, path, transform):
wxpath = gfx_ctx.CreatePath()
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
wxpath = self.convert_path(gfx_ctx, path, transform)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.width
h = self.height
rows, cols = im.shape[:2]
bitmap = wxc.BitmapFromBuffer(cols, rows, im.tostring())
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap, int(l), int(self.height - b),
int(w), int(-h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y - h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc is not None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font = wx.Font(int(size + 0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points * (PIXELS_PER_INCH / 72.0 * self.dpi / 72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = {'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND}
_joind = {'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND}
_dashd_wx = wxc.dashd_wx
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
DEBUG_MSG("__init__() 2: %s" % bitmap, 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGBA=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
w = float(w)
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w > 0 and w < 1:
w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw == 0:
lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH # Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(
red=int(r),
green=int(g),
blue=int(b),
alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wx.Sizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL: 'control',
wx.WXK_SHIFT: 'shift',
wx.WXK_ALT: 'alt',
wx.WXK_LEFT: 'left',
wx.WXK_UP: 'up',
wx.WXK_RIGHT: 'right',
wx.WXK_DOWN: 'down',
wx.WXK_ESCAPE: 'escape',
wx.WXK_F1: 'f1',
wx.WXK_F2: 'f2',
wx.WXK_F3: 'f3',
wx.WXK_F4: 'f4',
wx.WXK_F5: 'f5',
wx.WXK_F6: 'f6',
wx.WXK_F7: 'f7',
wx.WXK_F8: 'f8',
wx.WXK_F9: 'f9',
wx.WXK_F10: 'f10',
wx.WXK_F11: 'f11',
wx.WXK_F12: 'f12',
wx.WXK_SCROLL: 'scroll_lock',
wx.WXK_PAUSE: 'break',
wx.WXK_BACK: 'backspace',
wx.WXK_RETURN: 'enter',
wx.WXK_INSERT: 'insert',
wx.WXK_DELETE: 'delete',
wx.WXK_HOME: 'home',
wx.WXK_END: 'end',
wx.WXK_PAGEUP: 'pageup',
wx.WXK_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_NUMPAD_ADD: '+',
wx.WXK_NUMPAD_SUBTRACT: '-',
wx.WXK_NUMPAD_MULTIPLY: '*',
wx.WXK_NUMPAD_DIVIDE: '/',
wx.WXK_NUMPAD_DECIMAL: 'dec',
wx.WXK_NUMPAD_ENTER: 'enter',
wx.WXK_NUMPAD_UP: 'up',
wx.WXK_NUMPAD_RIGHT: 'right',
wx.WXK_NUMPAD_DOWN: 'down',
wx.WXK_NUMPAD_LEFT: 'left',
wx.WXK_NUMPAD_PAGEUP: 'pageup',
wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD_HOME: 'home',
wx.WXK_NUMPAD_END: 'end',
wx.WXK_NUMPAD_INSERT: 'insert',
wx.WXK_NUMPAD_DELETE: 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l, b, w, h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn(
"could not find a setinitialsize function for backend_wx; "
"please report your wxpython version=%s "
"to the matplotlib developers list" %
wxc.backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize',
do_nothing)
if not hasattr(self, 'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible',
lambda *args: True)
# Create the drawing bitmap
self.bitmap = wxc.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w, h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
self.Bind(wx.EVT_SIZE, self._onSize)
self.Bind(wx.EVT_PAINT, self._onPaint)
self.Bind(wx.EVT_KEY_DOWN, self._onKeyDown)
self.Bind(wx.EVT_KEY_UP, self._onKeyUp)
self.Bind(wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self._onRightButtonDClick)
self.Bind(wx.EVT_RIGHT_UP, self._onRightButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
self.Bind(wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
self.Bind(wx.EVT_LEFT_DCLICK, self._onLeftButtonDClick)
self.Bind(wx.EVT_LEFT_UP, self._onLeftButtonUp)
self.Bind(wx.EVT_MOTION, self._onMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self._onEnter)
self.Bind(wx.EVT_IDLE, self._onIdle)
# Add middle button events
self.Bind(wx.EVT_MIDDLE_DOWN, self._onMiddleButtonDown)
self.Bind(wx.EVT_MIDDLE_DCLICK, self._onMiddleButtonDClick)
self.Bind(wx.EVT_MIDDLE_UP, self._onMiddleButtonUp)
if wx.VERSION_STRING < "2.9":
# only needed in 2.8 to reduce flicker
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
else:
# this does the same in 2.9+
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
self.macros = {} # dict from wx id to seq of macros
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
if not wx.TheClipboard.IsOpened():
open_success = wx.TheClipboard.Open()
if open_success:
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
wx.TheClipboard.Flush()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Triggering a paint event is all that is needed to defer drawing
# until later. The platform will send the event when it thinks it is
# a good time (usually as soon as there are no other events pending).
self.Refresh(eraseBackground=False)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only
for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerWx(self, *args, **kwargs)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout * 1000, oneShot=True)
self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wxc.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
if hasattr(self, '_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = sorted(filetypes.items())
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None, origin='WX'):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied wx.PaintDC device context.
The 'WXAgg' backend sets origin accordingly.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if not drawDC:
# not called from OnPaint use a ClientDC
drawDC = wx.ClientDC(self)
# following is for 'WX' backend on Windows
# the bitmap can not be in use by another DC,
# see GraphicsContextWx._cache
if wx.Platform == '__WXMSW__' and origin == 'WX':
img = self.bitmap.ConvertToImage()
bmp = img.ConvertToBitmap()
drawDC.DrawBitmap(bmp, 0, 0)
else:
drawDC.DrawBitmap(self.bitmap, 0, 0)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
if not _has_pil:
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG,
*args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
if not _has_pil:
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF,
*args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l, b, width, height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wxc.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# image is the object that we call SaveFile on.
image = self.bitmap
# set the JPEG quality appropriately. Unfortunately, it is only
# possible to set the quality on a wx.Image object. So if we
# are saving a JPEG, convert the wx.Bitmap to a wx.Image,
# and set the quality.
if filetype == wx.BITMAP_TYPE_JPEG:
jpeg_quality = kwargs.get('quality',
rcParams['savefig.jpeg_quality'])
image = self.bitmap.ConvertToImage()
image.SetOption(wx.IMAGE_OPTION_QUALITY, str(jpeg_quality))
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not image.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
elif is_writable_file_like(filename):
if not isinstance(image, wx.Image):
image = image.ConvertToImage()
if not image.SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap = wxc.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return # Empty figure
dpival = self.figure.dpi
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
FigureCanvasBase.resize_event(self)
def _get_key(self, evt):
keyval = evt.KeyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval < 256:
key = chr(keyval)
# wx always returns an uppercase, so make it lowercase if the shift
# key is not depressed (NOTE: this will not handle Caps Lock)
if not evt.ShiftDown():
key = key.lower()
else:
key = None
for meth, prefix in (
[evt.AltDown, 'alt'],
[evt.ControlDown, 'ctrl'], ):
if meth():
key = '{0}+{1}'.format(prefix, key)
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
# print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3,
dblclick=True, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
if self.HasCapture():
self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1,
dblclick=True, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
if self.HasCapture():
self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
# Add middle button events
def _onMiddleButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 2, guiEvent=evt)
def _onMiddleButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 2,
dblclick=True, guiEvent=evt)
def _onMiddleButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
if self.HasCapture():
self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 2, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
# print "delta,rotation,rate",delta,rotation,rate
step = rate * float(rotation) / delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self, '_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent=evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent=evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.App instance if it has not been created sofar.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.App(False)
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
needmain = not wx.App.IsMainLoopRunning()
if needmain:
wxapp = wx.GetApp()
if wxapp is not None:
wxapp.MainLoop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
fig = figure
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos = wx.Point(20, 20)
l, b, w, h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.canvas.SetFocus()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
if wxc.is_phoenix:
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.canvas.SetMinSize((2, 2))
# give the window a matplotlib icon rather than the stock one.
# This is not currently working on Linux and is untested elsewhere.
# icon_path = os.path.join(matplotlib.rcParams['datapath'],
# 'images', 'matplotlib.png')
#icon = wx.IconFromBitmap(wx.Bitmap(icon_path))
# for xpm type icons try:
#icon = wx.Icon(icon_path, wx.BITMAP_TYPE_XPM)
# self.SetIcon(icon)
self.figmgr = FigureManagerWx(self.canvas, num, self)
self.Bind(wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.close_event()
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
# self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
try:
self.canvas.mpl_disconnect(self.toolbar._idDrag)
# Rationale for line above: see issue 2941338.
except AttributeError:
pass # classic toolbar lacks the attribute
if not self.IsBeingDeleted():
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb is not None:
self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def show(self):
self.frame.Show()
self.canvas.draw()
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
def get_window_title(self):
return self.window.GetTitle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU = wx.NewId()
_NTB_AXISMENU_BUTTON = wx.NewId()
_NTB_X_PAN_LEFT = wx.NewId()
_NTB_X_PAN_RIGHT = wx.NewId()
_NTB_X_ZOOMIN = wx.NewId()
_NTB_X_ZOOMOUT = wx.NewId()
_NTB_Y_PAN_UP = wx.NewId()
_NTB_Y_PAN_DOWN = wx.NewId()
_NTB_Y_ZOOMIN = wx.NewId()
_NTB_Y_ZOOMOUT = wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE = wx.NewId()
_NTB_CLOSE = wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'], 'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying' % bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu = wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId = wx.NewId()
self._invertId = wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected",
False)
self._menu.AppendSeparator()
self.Bind(wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
self.Bind(wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
self.Bind(wx.EVT_MENU, self._handleInvertAxesSelected,
id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
if wxc.is_phoenix:
x, y = self.GetPosition()
w, h = self.GetSize()
else:
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y + h - 4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
# Lines above would be deleted based on svn tracker ID 2841525;
# not clear whether this matters or not.
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId = wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i,
"Select axis %d" % i,
True)
self._menu.Check(menuId, True)
self.Bind(wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
elif maxAxis < len(self._axisId):
for menuId in self._axisId[maxAxis:]:
self._menu.Delete(menuId)
self._axisId = self._axisId[:maxAxis]
self._toolbar.set_active(list(xrange(maxAxis)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e + 1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE: wx.CURSOR_HAND,
cursors.HAND: wx.CURSOR_HAND,
cursors.POINTER: wx.CURSOR_ARROW,
cursors.SELECT_REGION: wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
self.prevZoomRect = None
# for now, use alternate zoom-rectangle drawing on all
# Macs. N.B. In future versions of wx it may be possible to
# detect Retina displays with window.GetContentScaleFactor()
# and/or dc.GetContentScaleFactor()
self.retinaFix = 'wxMac' in wx.PlatformInfo
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
self.wx_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.AddSeparator()
continue
self.wx_ids[text] = wx.NewId()
wxc._AddTool(self, self.wx_ids, text,
_load_bitmap(image_file + '.png'),
tooltip_text)
self.Bind(wx.EVT_TOOL, getattr(self, callback),
id=self.wx_ids[text])
self.Realize()
def zoom(self, *args):
self.ToggleTool(self.wx_ids['Pan'], False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self.wx_ids['Zoom'], False)
NavigationToolbar2.pan(self, *args)
def configure_subplots(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save_figure(self, *args):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = self.canvas.get_default_filename()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG(
'Save file dir:%s name:%s' %
(dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
# looks like they forgot to set the image type drop
# down, going with the extension.
warnings.warn(
'extension %s did not match the selected '
'image type %s; going with %s' %
(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception as e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor = wxc.Cursor(cursord[cursor])
self.canvas.SetCursor(cursor)
def release(self, event):
try:
del self.lastrect
except AttributeError:
pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
if not self.retinaFix:
self.wxoverlay = wx.Overlay()
else:
self.savedRetinaImage = self.canvas.copy_from_bbox(
self.canvas.figure.gca().bbox)
self.zoomStartX = event.xdata
self.zoomStartY = event.ydata
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
if not self.retinaFix:
self.wxoverlay.Reset()
del self.wxoverlay
else:
del self.savedRetinaImage
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.prevZoomRect = None
def draw_rubberband(self, event, x0, y0, x1, y1):
if self.retinaFix: # On Macs, use the following code
# wx.DCOverlay does not work properly on Retina displays.
rubberBandColor = '#C0C0FF'
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.canvas.restore_region(self.savedRetinaImage)
X0, X1 = self.zoomStartX, event.xdata
Y0, Y1 = self.zoomStartY, event.ydata
lineX = (X0, X0, X1, X1, X0)
lineY = (Y0, Y1, Y1, Y0, Y0)
self.prevZoomRect = self.canvas.figure.gca().plot(
lineX, lineY, '-', color=rubberBandColor)
self.canvas.figure.gca().draw_artist(self.prevZoomRect[0])
self.canvas.blit(self.canvas.figure.gca().bbox)
return
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1 < y0:
y0, y1 = y1, y0
if x1 < y0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wxc.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b, a = color.Get(True)
color.Set(r, g, b, 0x60)
dc.SetBrush(wx.Brush(color))
if wxc.is_phoenix:
dc.DrawRectangle(rect)
else:
dc.DrawRectangleRect(rect)
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None:
self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self.wx_ids['Back'], can_backward)
self.EnableTool(self.wx_ids['Forward'], can_forward)
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
# self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
# def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""
Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5, margin=0.5, title='matplotlib'):
wx.Printout.__init__(self, title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
# current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw, pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw, pgh) = self.GetPageSizePixels() # page size in pixels
(dcw, dch) = dc.GetSize()
if wxc.is_phoenix:
(grw, grh) = self.canvas.GetSize()
else:
(grw, grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth(
int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight(
int(self.canvas.bitmap.GetHeight() * vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview():
page_scale = float(dcw) / pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale) / float(grw)
dc.SetDeviceOrigin(left_margin, top_margin)
dc.SetUserScale(user_scale, user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasWx
FigureManager = FigureManagerWx
Toolbar = NavigationToolbar2Wx
| apache-2.0 |
dkillick/iris | lib/iris/tests/unit/quickplot/test_contourf.py | 11 | 2425 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.quickplot.contourf` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests import mock
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.contourf(self.cube, coords=('bar', 'str_coord'))
self.assertPointsTickLabels('yaxis')
def test_xaxis_labels(self):
qplt.contourf(self.cube, coords=('str_coord', 'bar'))
self.assertPointsTickLabels('xaxis')
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
mocker = mock.Mock(alpha=0, antialiased=False)
self.mpl_patch = self.patch('matplotlib.pyplot.contourf',
return_value=mocker)
# Also need to mock the colorbar.
self.patch('matplotlib.pyplot.colorbar')
self.draw_func = qplt.contourf
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
imk1/IMKTFBindingCode | associateRegionsToAcitveInactiveOld.py | 1 | 11385 | import sys
import pybedtools as bt
import os
from deeplearning.mergedPeaksSummitAndMakeContinuousMat import show_value
import numpy as np
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import linkage, cut_tree, leaves_list
import random
from scipy.spatial.distance import pdist
from polo import optimal_leaf_ordering
from igraph.drawing.colors import RainbowPalette
from seaborn import heatmap
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def getActivity(chromatinState):
# Determine if a chromatin state is active or inactive
activeStates = ["E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8", "E9", "E10", "E11", "E14", "E15"]
inactiveStates = ["E12", "E13", "E16", "E17", "E18"]
if chromatinState in activeStates:
# The chromatin state is active
return 1
return 0
def getNextRegionAndActivity(clusterFile):
# Gets the next region and activity in a cluster file
currentClusterLine = clusterFile.readline()
if currentClusterLine != "":
# Not at the end of the file
currentClusterLineElements = currentClusterLine.strip().split("\t")
currentClusterRegionActivity = getActivity(currentClusterLineElements[3])
currentClusterRegion =\
(currentClusterLineElements[0], int(currentClusterLineElements[1]), int(currentClusterLineElements[2]), currentClusterRegionActivity)
else:
currentClusterRegion = ("", -1, -1, 0)
return currentClusterRegion
def associateRegionsToAcitveInactive(regionFileName, clusterFileNameListFileName, outputFileName):
# Find the cluster for each regionCenter
# Assumes that each base has been assigned to exactly 1 cluster
# ASSUMES THAT CLUSTER FILES HAVE BEEN SORTED BY CHROM, THEN START, THEN END
regionList = bt.BedTool(regionFileName).sort()
clusterFileNameListFile = open(clusterFileNameListFileName)
clusterFileList = []
currentClusterRegionList = []
for line in clusterFileNameListFile:
# Iterate through the cluster file names and open each file
clusterFile = open(line.strip())
clusterFileList.append(clusterFile)
currentClusterLine = clusterFile.readline()
currentClusterLineElements = currentClusterLine.strip().split("\t")
currentClusterRegionActivity = getActivity(currentClusterLineElements[3])
currentClusterRegion =\
(currentClusterLineElements[0], int(currentClusterLineElements[1]), int(currentClusterLineElements[2]), currentClusterRegionActivity)
currentClusterRegionList.append(currentClusterRegion)
if os.path.isfile(outputFileName):
# Do not over-write the existing file
print("ActiveInactive matrix already exists!")
return regionList, len(clusterFileList)
outputFile = open(outputFileName, 'w+')
print("Getting cell type activity for each region!")
for region in regionList:
# Iterate through regions and find the cluster for each region
outputFile.write("\t".join([show_value(region[0]), show_value(region[1]), show_value(region[2])]))
chrom = show_value(region[0])
regionCenter = int(show_value(region[1])) + int(round((float(show_value(region[2])) - float(show_value(region[1])))/2.0))
for i in range(len(clusterFileList)):
# Iterate through cluster files and find the activity of the cluster overlapping the current region center
if currentClusterRegionList[i][0] == "":
# All parts of the current cluster file have been seen, so record 0 and continue
outputFile.write("\t" + "0")
continue
allClusterFileSeen = False
while currentClusterRegionList[i][0] < chrom:
# Iterate through the cluster file's regions until one on the same chromosome has been found
if currentClusterRegionList[i][0] == "":
# All parts of the current cluster file have been seen, so record 0 and stop
outputFile.write("\t" + "0")
allClusterFileSeen = True
break
currentClusterRegionList[i] = getNextRegionAndActivity(clusterFileList[i])
if allClusterFileSeen == True:
# All of the current cluster has been seen, so continue
continue
while (currentClusterRegionList[i][2] <= regionCenter) and (currentClusterRegionList[i][0] == chrom):
# Iterate through the cluster's regions until one not before regionCenter has been found
currentClusterRegionList[i] = getNextRegionAndActivity(clusterFileList[i])
if currentClusterRegionList[i][0] == "":
# All parts of the current cluster have been seen, so record 0 and stop
outputFile.write("\t" + "0")
allClusterFileSeen = True
break
if allClusterFileSeen == True:
# All of the current cluster has been seen, so continue
continue
if (regionCenter >= currentClusterRegionList[i][1]) and (regionCenter < currentClusterRegionList[i][2]):
# The region belongs to the region within the cluster file
outputFile.write("\t" + str(currentClusterRegionList[i][3]))
continue
outputFile.write("\t" + "0")
outputFile.write("\n")
print("Got activity for " + str(regionList.count()) + " regions.")
for clusterFile in clusterFileList:
# Iterate through the files with the clusters and close each cluster
clusterFile.close()
outputFile.close()
return regionList, len(clusterFileList)
def clusterActiveInactive(outputFileName, heatmapFileNamePrefix, numCellTypes, regionList, maxClust, liftoverChainsFileName, cellTypesFileName, genomeFileName, \
DNaseUnionFileName, chromSizesFileName):
# Cluster the binary indicators of activity
print("Clustering!")
activeInactiveMat = np.loadtxt(outputFileName, dtype='int16', usecols=range(3,numCellTypes+3))
cellTypesFile = open(cellTypesFileName)
cellTypes = [line.strip() for line in cellTypesFile]
cellTypesFile.close()
DNaseUnion = bt.BedTool(DNaseUnionFileName)
regionListDNaseFastaFileName = outputFileName[0:-4] + "_overlappingDNase.fa"
DNaseUnion.intersect(regionList, wa=True, u = True).slop(g = chromSizesFileName, b = 200).sort().merge().sequence(fi = genomeFileName, fo = regionListDNaseFastaFileName)
# Clustering
rowkclust = KMeans(n_clusters=25, init='k-means++', n_init=20, max_iter=50)
rowkclust.fit(activeInactiveMat)
# Hierarchically cluster clusters
clusterCentroidLinkage = linkage(rowkclust.cluster_centers_, method='ward', metric='euclidean')
groups = cut_tree(clusterCentroidLinkage, height=0.1*max(clusterCentroidLinkage[:,2]))
# Reassign clusters
newClusters = np.array([groups[l] for l in rowkclust.labels_])
print(max(newClusters) + 1)
optNumRowClust = max(newClusters) + 1
if optNumRowClust > maxClust:
# The optimal number of clusters is too large, so change it to 15
optNumRowClust = maxClust
random.seed(5)
# Order Columns
colClustDistMat = pdist(activeInactiveMat.transpose(), metric='euclidean')
colClust = linkage(activeInactiveMat.transpose(), method='average', metric='euclidean')
optimalorder = optimal_leaf_ordering(colClust, colClustDistMat)
colSortInd = leaves_list(optimalorder)
for numRowClust in range(5, maxClust + 1):
# Iterate through the potential numbers of clusters and try each
print("numRowClust is " + str(numRowClust))
rowkclust = KMeans(n_clusters=numRowClust, init='k-means++', n_init=50, max_iter=10)
rowkclust.fit(activeInactiveMat)
# Sort by cluster mean
rowclustermeans = np.mean(rowkclust.cluster_centers_, axis = 1)
plotClusters = rowkclust.labels_
rowkclustCentroidsArgSortReverse = (0-rowclustermeans).argsort()
rowkclustCentroidsRanksReverse = np.empty(rowkclust.cluster_centers_.shape[0], int)
rowkclustCentroidsRanksReverse[rowkclustCentroidsArgSortReverse] = np.arange(rowkclustCentroidsArgSortReverse.shape[0])
for clust in range(numRowClust):
# Iterate through the clusters and replace the labels by the reverse rank of the cluster centroids
plotClusters[np.where(rowkclust.labels_ == clust)] = rowkclustCentroidsRanksReverse[clust]
# Order by plot cluster
rowSortInd = np.argsort(plotClusters)
# Plot
# Order columns and rows
rowSortActiveInactiveMat = activeInactiveMat[rowSortInd,:]
colSortActiveInactiveMat = rowSortActiveInactiveMat[:, colSortInd]
# Region cluster colors
cols = RainbowPalette(n=numRowClust)
# Name the heatmap file
heatmapFileName = heatmapFileNamePrefix + "_" + str(numRowClust)
if numRowClust == optNumRowClust:
# The number of clusters is the optimal number, so add optimal to the file name
heatmapFileName = heatmapFileName + "_optimal"
colList = []
labList = []
for ind in range(numRowClust):
# Iterate through the clusters and make the colors and labels for each
if ind >= np.unique(plotClusters).shape[0]:
# The current index is larger than the largest index, so skip it
continue
colList.extend([cols[ind] for i in range(len(np.argwhere(plotClusters == np.unique(plotClusters)[ind])))])
labList.extend([ind for i in range(len(np.argwhere(plotClusters == np.unique(plotClusters)[ind])))])
if numRowClust == optNumRowClust:
# At the optimal number of clusters, so make the bed files and run Homer
regionListCurrentClust = regionList.at(np.argwhere(plotClusters == np.unique(plotClusters)[ind]))
regionListCurrentClustFileName = heatmapFileName + "_" + str(ind) + ".bed"
regionListCurrentClust.saveas(regionListCurrentClustFileName)
regionListCurrentClusthg19FileName = heatmapFileName + "_" + str(ind) + "_hg19.bed"
regionListCurrentClust.liftover(liftoverChainsFileName).cut([0,1,2]).saveas(regionListCurrentClusthg19FileName)
regionListCurrentClustDNaseUnionFastaFileName = heatmapFileName + "_" + str(ind) + "_DNaseUnion.fa"
DNaseUnion.intersect(regionListCurrentClust, wa=True, u=True).slop(g = chromSizesFileName, b = 200).sort().merge().sequence(fi = genomeFileName, \
fo = regionListCurrentClustDNaseUnionFastaFileName)
regionListCurrentClustHomerResultsDir = heatmapFileName + "_" + str(ind) + ".HomerResults"
os.system(" ".join(["findMotifs.pl", regionListCurrentClustDNaseUnionFastaFileName, "fasta", \
regionListCurrentClustHomerResultsDir, "-fasta", regionListDNaseFastaFileName]))
os.remove(regionListCurrentClustDNaseUnionFastaFileName)
colVec = np.array(colList)
labVec = np.array(labList)
# Plot heatmap
heatmapFileName = heatmapFileName + ".png"
fig, ax = plt.subplots(figsize=(24, 14))
#plt.axis((4.1, 6.1, 7.1, 4.1))
#plt.subplots_adjust(left=2, bottom=2, right=6, top=2.1)
cellTypesReordered = [cellTypes[i] for i in colSortInd]
ax = heatmap(colSortActiveInactiveMat, cmap='RdBu', xticklabels=cellTypesReordered, yticklabels=labVec)
fig.savefig(heatmapFileName, format="png")
os.remove(regionListDNaseFastaFileName)
if __name__=="__main__":
regionFileName = sys.argv[1]
clusterFileNameListFileName = sys.argv[2]
outputFileName = sys.argv[3]
clusterData = int(sys.argv[4])
regionList, numCellTypes = associateRegionsToAcitveInactive(regionFileName, clusterFileNameListFileName, outputFileName)
if clusterData == 1:
# Cluster the data
heatmapFileNamePrefix = sys.argv[5]
liftoverChainsFileName = sys.argv[6]
cellTypesFileName = sys.argv[7]
genomeFileName = sys.argv[8]
DNaseUnionFileName = sys.argv[9]
chromSizesFileName = sys.argv[10]
maxClust = 15
if len(sys.argv) > 11:
# The maximum number of clusters has been specified
maxClust = int(sys.argv[11])
clusterActiveInactive(outputFileName, heatmapFileNamePrefix, numCellTypes, regionList, maxClust, liftoverChainsFileName, cellTypesFileName, \
genomeFileName, DNaseUnionFileName, chromSizesFileName)
| mit |
Parallel-in-Time/pySDC | pySDC/playgrounds/deprecated/acoustic_1d_imex/ploterrorconstants.py | 1 | 3327 | from subprocess import call
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
from pylab import rcParams
fs = 8
order = np.array([])
nsteps = np.array([])
error = np.array([])
# load SDC data
file = open('conv-data.txt', 'r')
while True:
line = file.readline()
if not line: break
items = str.split(line, " ", 3)
order = np.append(order, int(items[0]))
nsteps = np.append(nsteps, int(float(items[1])))
error = np.append(error, float(items[2]))
file.close()
assert np.size(order)==np.size(nsteps), 'Found different number of entries in order and nsteps'
assert np.size(nsteps)==np.size(error), 'Found different number of entries in nsteps and error'
N = np.size(nsteps)/3
assert isinstance(N, int), 'Number of entries not a multiple of three'
# load Runge-Kutta data
order_rk = np.array([])
nsteps_rk = np.array([])
error_rk = np.array([])
file = open('conv-data-rk.txt', 'r')
while True:
line = file.readline()
if not line: break
items = str.split(line, " ", 3)
order_rk = np.append(order_rk, int(items[0]))
nsteps_rk = np.append(nsteps_rk, int(float(items[1])))
error_rk = np.append(error_rk, float(items[2]))
file.close()
assert np.size(order_rk)==np.size(nsteps_rk), 'Found different number of entries in order and nsteps'
assert np.size(nsteps_rk)==np.size(error_rk), 'Found different number of entries in nsteps and error'
N = np.size(nsteps_rk)/3
assert isinstance(N, int), 'Number of entries not a multiple of three'
### Compute and plot error constant ###
errconst_sdc = np.zeros((3,N))
errconst_rk = np.zeros((3,N))
nsteps_plot_sdc = np.zeros((3,N))
nsteps_plot_rk = np.zeros((3,N))
order_plot = np.zeros(3)
for ii in range(0,3):
order_plot[ii] = order[N*ii]
for jj in range(0,N):
p_sdc = order[N*ii+jj]
err_sdc = error[N*ii+jj]
nsteps_plot_sdc[ii,jj] = nsteps[N*ii+jj]
dt_sdc = 1.0/float(nsteps_plot_sdc[ii,jj])
errconst_sdc[ii,jj] = err_sdc/dt_sdc**float(p_sdc)
p_rk = order_rk[N*ii+jj]
err_rk = error_rk[N*ii+jj]
nsteps_plot_rk[ii,jj] = nsteps_rk[N*ii+jj]
dt_rk = 1.0/float(nsteps_plot_rk[ii,jj])
errconst_rk[ii,jj] = err_rk/dt_rk**float(p_rk)
color = [ 'r', 'b', 'g' ]
shape_sdc = ['<', '^', '>']
shape_rk = ['o', 'd', 's']
rcParams['figure.figsize'] = 2.5, 2.5
fig = plt.figure()
for ii in range(0,3):
plt.semilogy(nsteps_plot_sdc[ii,:], errconst_sdc[ii,:], shape_sdc[ii], markersize=fs, color=color[ii], label='SDC('+str(int(order_plot[ii]))+')')
plt.semilogy(nsteps_plot_rk[ii,:], errconst_rk[ii,:], shape_rk[ii], markersize=fs-2, color=color[ii], label='IMEX('+str(int(order_plot[ii]))+')')
plt.legend(loc='lower left', fontsize=fs, prop={'size':fs-1}, ncol=2)
plt.xlabel('Number of time steps', fontsize=fs)
plt.ylabel('Estimated error constant', fontsize=fs, labelpad=2)
plt.xlim([0.9*np.min(nsteps_plot_sdc), 1.1*np.max(nsteps_plot_sdc)])
plt.ylim([1e1, 1e6])
plt.yticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6],fontsize=fs)
plt.xticks([20, 30, 40, 60, 80, 100], fontsize=fs)
plt.gca().get_xaxis().get_major_formatter().labelOnlyBase = False
plt.gca().get_xaxis().set_major_formatter(ScalarFormatter())
#plt.show()
filename = 'error_constants.pdf'
fig.savefig(filename,bbox_inches='tight')
call(["pdfcrop", filename, filename])
| bsd-2-clause |
penguinmenac3/alice-ai | utilitylearning/deepqlearning.py | 1 | 5207 | import time
import random
from collections import deque
import numpy as np
import os
import cv2
class Trainer(object):
def __init__(self, bot, agent, model, memory_name="default"):
self.bot = bot
self.agent = agent
self.model = model
self.dt = 0.1
self.initial_observe = 1000
self.replay_mem_size = 60000
self.replay_memory = deque()
self.replay_memory_bad = deque()
self.batch_size = 64
self.epsilon = 0.05
self.momentum = 0.0
self.l2_decay = 0.01
self.gamma = 0.7
self.memory_name = memory_name
if os.path.isfile(memory_name + ".npz"):
data = dict(np.load(memory_name + ".npz"))["arr_0"][()]
self.replay_memory = deque(data["replay_memory"])
self.replay_memory_bad = deque(data["replay_memory_bad"])
print()
def save(self):
data = {}
data["replay_memory"] = list(self.replay_memory)
data["replay_memory_bad"] = list(self.replay_memory_bad)
np.savez(self.memory_name, data)
print("Saved trainer memory.")
def _add_to_replay_memory(self, state, action, reward):
arr = list(self.agent.history)[1:]
arr.append(state)
terminal = reward < 0
hist1 = [item for sublist in list(self.agent.history) for item in sublist]
hist2 = [item for sublist in arr for item in sublist]
if reward >= 0:
self.replay_memory.append((hist1, action, reward, hist2, terminal))
else:
self.replay_memory_bad.append((hist1, action, reward, hist2, terminal))
if len(self.replay_memory) > self.replay_mem_size:
self.replay_memory.popleft()
if len(self.replay_memory_bad) > self.replay_mem_size:
self.replay_memory_bad.popleft()
def _train_step(self):
minibatch = random.sample(list(self.replay_memory_bad) + list(self.replay_memory), self.batch_size)
inputs = np.zeros((self.batch_size, self.model.history_size * self.model.state_size))
targets = np.zeros((inputs.shape[0], self.model.action_size))
for i in range(0, len(minibatch)):
s_t = minibatch[i][0]
a_t = minibatch[i][1]
r = minibatch[i][2]
s_t1 = minibatch[i][3]
terminal = minibatch[i][4]
inputs[i:i + 1] = s_t
targets[i] = self.model.model.predict(np.array([s_t]))
Q_sa = self.model.model.predict(np.array([s_t1]))
idx = np.argmax(a_t)
if terminal:
targets[i, idx] = r
else:
targets[i, idx] = r + self.gamma * np.max(Q_sa)
self.model.model.train_on_batch(inputs, targets)
def train(self, plot_error=False):
stats = []
if plot_error:
import matplotlib.pyplot as plt
plt.ion()
try:
t = max(0, len(self.replay_memory))
last_error = 0
state = self.bot.get_state()
img = self.bot.get_video()
print("Start training, stop with CTRL + C")
while True:
timing_start = time.time()
# Let the agent pick an action.
action = None
if t <= self.initial_observe:
action = self.agent.pick_action_one_hot(state, 1.0)
else:
action = self.agent.pick_action_one_hot(state, self.epsilon)
# Execute the action and observe
act, r = self.bot.act(self.bot.actions[np.argmax(action)], self.dt)
state = self.bot.get_state()
img = self.bot.get_video()
# Create two entries for the replay memory
self._add_to_replay_memory(state, action, r)
# Do a training step
if t > self.initial_observe:
self._train_step()
# Try to reset if we are in a simulation environment
if r < 0:
error_free = t - last_error
stats.append(error_free)
if plot_error:
plt.clf()
plt.gcf().canvas.set_window_title("collision free statistic")
plt.ylabel("collision free steps")
plt.xlabel("collision Id")
plt.plot(stats)
plt.pause(0.001)
print("Reset: " + str(t) + ", error free: " + str(t - last_error))
self.agent.clear_history()
last_error = t
# Save the model
if t % 1000 == 0 and t > self.initial_observe:
print("save model")
self.model.save()
elapsed_time = time.time() - timing_start
sleep_time = max(0, self.dt - elapsed_time)
time.sleep(sleep_time)
if plot_error:
plt.pause(0.001)
t += 1
except KeyboardInterrupt:
print("User Interrupt: end training.")
return
| lgpl-3.0 |
JAmarel/FilmThickness | customplotting.py | 1 | 7326 | import numpy as np
import matplotlib.pyplot as plt
def HSM_Plot(RedIntensity,GreenIntensity,BlueIntensity,RedSTD,GreenSTD,BlueSTD,ExposureTimes):
"""Plots intensity values as recorded from imagej histograms vs ExposureTime.
Parameters
----------
RedIntensity: (N+1,) array.
Numpy array containing N+1 subarrays. Where each sub array corresponds to
a different lasso'd region. Each entry in each sub array is an intensity
value that corresponds to a certain exposure time. N is the number of
lasso'd regions. So it has shape (N+1,) to account for extra HSM data
RedSTD: (N+1,) array
Same as RedIntensity, except now each entry in a sub array is the standard
deviation of each intensity. Calculated from imagej histogram data.
ExposureTimes: (N+1,) array
Same as above. Each entry is now an exposure time.
"""
f, (ax1,ax2,ax3) = plt.subplots(3, 1,figsize=(8,12))
#Removing Top/Right Spine/Ticks
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.yaxis.set_ticks_position('left')
ax3.xaxis.set_ticks_position('bottom')
plt.sca(ax1)
for i in np.arange(0,len(RedIntensity)):
plt.errorbar(ExposureTimes[i],RedIntensity[i], yerr=RedSTD[i],ls='None',marker='None')
plt.title("Red Channel")
plt.xlabel("Exposure Time (s)")
plt.ylabel("Intensity [0-255]")
plt.xlim(0,np.amax(ExposureTimes[i])*1.1)
plt.ylim(0,255)
plt.sca(ax2)
for i in np.arange(0,len(GreenIntensity)):
plt.errorbar(ExposureTimes[i],GreenIntensity[i], yerr=GreenSTD[i],ls='None',marker='None')
plt.title("Green Channel")
plt.xlabel("Exposure Time (s)")
plt.ylabel("Intensity [0-255]")
plt.xlim(0,np.amax(ExposureTimes[i])*1.1)
plt.ylim(0,255)
plt.sca(ax3)
for i in np.arange(0,len(BlueIntensity)):
plt.errorbar(ExposureTimes[i],BlueIntensity[i], yerr=BlueSTD[i],ls='None',marker='None')
plt.title("Blue Channel")
plt.xlabel("Exposure Time (s)")
plt.ylabel("Intensity [0-255]")
plt.xlim(0,np.amax(ExposureTimes[i])*1.1)
plt.ylim(0,255)
plt.tight_layout()
def Line_Plot(RedIntensity,GreenIntensity,BlueIntensity,RedSTD,GreenSTD,BlueSTD,RedSlopes,GreenSlopes,BlueSlopes,ExposureTimes):
"""Plots intensity values as recorded from imagej histograms vs ExposureTime.
Parameters
----------
RedIntensity: (N+1,) array.
Numpy array containing N+1 subarrays. Where each sub array corresponds to
a different lasso'd region. Each entry in each sub array is an intensity
value that corresponds to a certain exposure time. N is the number of
lasso'd regions. So it has shape (N+1,) to account for extra HSM data
RedSTD: (N+1,) array
Same as RedIntensity, except now each entry in a sub array is the standard
deviation of each intensity. Calculated from imagej histogram data.
ExposureTimes: (N+1,) array
Same as above. Each entry is now an exposure time.
"""
f, (ax1,ax2,ax3) = plt.subplots(3, 1,figsize=(8,12))
#Removing Top/Right Spine/Ticks
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.yaxis.set_ticks_position('left')
ax3.xaxis.set_ticks_position('bottom')
plt.sca(ax1)
for i in np.arange(0,len(RedIntensity)):
plt.errorbar(ExposureTimes[i],RedIntensity[i], yerr=RedSTD[i],ls='None',marker='None')
plt.title("Red Channel")
plt.xlabel("Exposure Time (s)")
plt.ylabel("Intensity [0-255]")
plt.xlim(0,np.amax(ExposureTimes[i])*1.1)
plt.ylim(0,255)
t = np.linspace(0,np.amax(ExposureTimes[i]),100)
plt.plot(t,RedSlopes[i]*t)
plt.sca(ax2)
for i in np.arange(0,len(GreenIntensity)):
plt.errorbar(ExposureTimes[i],GreenIntensity[i], yerr=GreenSTD[i],ls='None',marker='None')
plt.title("Green Channel")
plt.xlabel("Exposure Time (s)")
plt.ylabel("Intensity [0-255]")
plt.xlim(0,np.amax(ExposureTimes[i])*1.1)
plt.ylim(0,255)
t = np.linspace(0,np.amax(ExposureTimes[i]),100)
plt.plot(t,GreenSlopes[i]*t)
plt.sca(ax3)
for i in np.arange(0,len(BlueIntensity)):
plt.errorbar(ExposureTimes[i],BlueIntensity[i], yerr=BlueSTD[i],ls='None',marker='None')
plt.title("Blue Channel")
plt.xlabel("Exposure Time (s)")
plt.ylabel("Intensity [0-255]")
plt.xlim(0,np.amax(ExposureTimes[i])*1.1)
plt.ylim(0,255)
t = np.linspace(0,np.amax(ExposureTimes[i]),100)
plt.plot(t,BlueSlopes[i]*t)
plt.tight_layout()
def Ref(N,color):
RedWaveLength = 620e-9
GreenWaveLength = 545e-9
BlueWaveLength = 463e-9
if color == 'red':
y = RedWaveLength
elif color =='green':
y = GreenWaveLength
elif color == 'blue':
y = BlueWaveLength
n = 1.516 #Index of Refraction
a = 3.1e-9 #nm 8CB molecule length
r = -.205 #reflectance at normal incidence
B = 2*np.pi*N*n*a/y
numerator = 2*(r**2)*(1-np.cos(2*B))
denominator = 1 - 2*(r**2)*np.cos(2*B) + r**4
return numerator/denominator
def Ref_Plot(GreenRef,BlueRef,N_Guesses):
#Camera response (for curve)
RedWaveLength = 620e-9
GreenWaveLength = 545e-9
BlueWaveLength = 463e-9
N_Array = np.linspace(2,30,100) #This is used to plot the reflectivity curve fit
f, (ax1,ax2) = plt.subplots(2, 1,figsize=(8,12))
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
plt.sca(ax1)
plt.plot(N_Array**2,Ref(N_Array,'green'),color='green');
plt.scatter(N_Guesses**2,GreenRef,color = 'green');
plt.xlim(0,(np.amax(N_Array)**2)*1.1);
plt.ylim(0,np.amax(GreenRef)*2);
plt.title("Green Channel Reflectivity")
plt.xlabel("N$^2$")
plt.ylabel("Reflectivity")
plt.sca(ax2)
plt.plot(N_Array**2,Ref(N_Array,'blue'),color='blue');
plt.scatter(N_Guesses**2,BlueRef,color = 'blue');
plt.xlim(0,(np.amax(N_Array)**2)*1.1);
plt.ylim(0,np.amax(GreenRef)*2);
plt.title("Blue Channel Reflectivity")
plt.xlabel("N$^2$")
plt.ylabel("Reflectivity")
plt.tight_layout() | mit |
modelon/PyFMI | src/common/plotting/plot_gui.py | 1 | 73748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Modelon AB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib
matplotlib.interactive(True)
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
from matplotlib import rcParams
import fnmatch
import re
#GUI modules
try:
import wx
import wx.lib.agw.customtreectrl as wxCustom
import wx.lib.agw.aui as aui
except ImportError:
print("WX-Python not found. The GUI will not work.")
#JModelica related imports
try:
from pyfmi.common.io import ResultDymolaTextual
from pyfmi.common.io import ResultDymolaBinary
from pyfmi.common.io import ResultCSVTextual
from pyfmi.common.io import JIOError
except ImportError:
try:
from pyjmi.common.io import ResultDymolaTextual
from pyjmi.common.io import ResultDymolaBinary
from pyjmi.common.io import ResultCSVTextual
from pyjmi.common.io import JIOError
except ImportError:
print("JModelica Python package was not found.")
#Import general modules
import os as O
ID_GRID = 15001
ID_LICENSE = 15002
ID_LABELS = 15003
ID_AXIS = 15004
ID_MOVE = 15005
ID_ZOOM = 15006
ID_RESIZE = 15007
ID_LINES = 15008
ID_CLEAR = 15009
def convert_filter(expression):
"""
Convert a filter based on unix filename pattern matching to a
list of regular expressions.
"""
regexp = []
if isinstance(expression,str):
regex = fnmatch.translate(expression)
regexp = [re.compile(regex)]
elif isinstance(expression,list):
for i in expression:
regex = fnmatch.translate(i)
regexp.append(re.compile(regex))
else:
raise Exception("Unknown input.")
return regexp
def match(name, filter_list):
found = False
for j in range(len(filter_list)):
if re.match(filter_list[j], name):
found = True
break
return found
class MainGUI(wx.Frame):
sizeHeightDefault=900
sizeLengthDefault=675
sizeHeightMin=100
sizeLengthMin=130
sizeTreeMin=200
sizeTreeDefault=sizeTreeMin+40
def __init__(self, parent, ID, filename=None):
self.title = "JModelica.org Plot GUI"
wx.Frame.__init__(self, parent, ID, self.title,
wx.DefaultPosition, wx.Size(self.sizeHeightDefault, self.sizeLengthDefault))
#Handle idle events
#wx.IdleEvent.SetMode(wx.IDLE_PROCESS_SPECIFIED)
#Variables for the results
self.ResultFiles = [] #Contains all the result files
self.PlotVariables = [[]] #Contains all the variables for the different plots
self.ResultIndex = 0 #Index of the result file
self.PlotIndex = 0 #Index of the plot variables connected to the different plots
#Settings variables
self.grid = True
self.zoom = True
self.move = False
#Create menus and status bars
self.CreateStatusBar() #Create a statusbar at the bottom
self.CreateMenu() #Create the normal menu
#Create the main window
self.verticalSplitter = wx.SplitterWindow(self, -1, style = wx.CLIP_CHILDREN | wx.SP_LIVE_UPDATE | wx.SP_3D)
#Create the positioners
self.leftPanel = wx.Panel(self.verticalSplitter)
self.leftSizer = wx.BoxSizer(wx.VERTICAL)
self.rightPanel = wx.Panel(self.verticalSplitter)
self.rightSizer = wx.BoxSizer(wx.VERTICAL)
#Create the panels (Tree and Plot)
if wx.VERSION < (2,8,11,0):
self.noteBook = aui.AuiNotebook(self.rightPanel, style= aui.AUI_NB_TOP | aui.AUI_NB_TAB_SPLIT | aui.AUI_NB_TAB_MOVE | aui.AUI_NB_SCROLL_BUTTONS | aui.AUI_NB_CLOSE_ON_ACTIVE_TAB | aui.AUI_NB_DRAW_DND_TAB)
self.tree = VariableTree(self.noteBook, self.leftPanel,style = wx.SUNKEN_BORDER | wxCustom.TR_HAS_BUTTONS | wxCustom.TR_HAS_VARIABLE_ROW_HEIGHT | wxCustom.TR_HIDE_ROOT | wxCustom.TR_ALIGN_WINDOWS)
else:
self.noteBook = aui.AuiNotebook(self.rightPanel, agwStyle= aui.AUI_NB_TOP | aui.AUI_NB_TAB_SPLIT | aui.AUI_NB_TAB_MOVE | aui.AUI_NB_SCROLL_BUTTONS | aui.AUI_NB_CLOSE_ON_ACTIVE_TAB | aui.AUI_NB_DRAW_DND_TAB)
self.tree = VariableTree(self.noteBook, self.leftPanel,style = wx.SUNKEN_BORDER, agwStyle = wxCustom.TR_HAS_BUTTONS | wxCustom.TR_HAS_VARIABLE_ROW_HEIGHT | wxCustom.TR_HIDE_ROOT | wxCustom.TR_ALIGN_WINDOWS)
self.plotPanels = [PlotPanel(self.noteBook,self.grid, move=self.move, zoom=self.zoom)]
self.noteBook.AddPage(self.plotPanels[0],"Plot 1")
self.filterPanel = FilterPanel(self, self.leftPanel, self.tree)
#Add the panels to the positioners
self.leftSizer.Add(self.tree,1,wx.EXPAND)
self.leftSizer.Add(self.filterPanel,0,wx.EXPAND)
self.rightSizer.Add(self.noteBook,1,wx.EXPAND)
self.verticalSplitter.SplitVertically(self.leftPanel, self.rightPanel,self.sizeTreeDefault)
#self.verticalSplitter.SetMinimumPaneSize(self.sizeTreeMin)
self.verticalSplitter.SetMinimumPaneSize(self.filterPanel.GetBestSize()[0])
#Position the main windows
self.leftPanel.SetSizer(self.leftSizer)
self.rightPanel.SetSizer(self.rightSizer)
self.mainSizer = wx.BoxSizer() #Create the main positioner
self.mainSizer.Add(self.verticalSplitter, 1, wx.EXPAND) #Add the vertical splitter
self.SetSizer(self.mainSizer) #Set the positioner to the main window
self.SetMinSize((self.sizeHeightMin,self.sizeLengthMin)) #Set minimum sizes
#Bind the exit event from the "cross"
self.Bind(wx.EVT_CLOSE, self.OnMenuExit)
#Bind the tree item checked event
self.tree.Bind(wxCustom.EVT_TREE_ITEM_CHECKED, self.OnTreeItemChecked)
#Bind the key press event
self.tree.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress)
#Bind the closing of a tab
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnCloseTab, self.noteBook)
#Bind the changing of a tab
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CHANGING, self.OnTabChanging, self.noteBook)
#Bind the changed of a tab
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnTabChanged, self.noteBook)
if not filename == None:
self._OpenFile(filename)
self.Centre(True) #Position the GUI in the centre of the screen
self.Show(True) #Show the Plot GUI
def CreateMenu(self):
#Creating the menu
filemenu = wx.Menu()
helpmenu = wx.Menu()
editmenu = wx.Menu()
viewmenu = wx.Menu()
menuBar = wx.MenuBar()
#Create the menu options
# Main
self.menuOpen = filemenu.Append(wx.ID_OPEN, "&Open\tCtrl+O","Open a result.")
self.menuSaveFig = filemenu.Append(wx.ID_SAVE, "&Save\tCtrl+S", "Save the current figure.")
filemenu.AppendSeparator() #Append a seperator between Open and Exit
self.menuExit = filemenu.Append(wx.ID_EXIT,"E&xit\tCtrl+X"," Terminate the program.")
# Edit
self.editAdd = editmenu.Append(wx.ID_ADD,"A&dd Plot","Add a plot window.")
self.editClear = editmenu.Append(wx.ID_CLEAR, "Clear Plot", "Clear the current plot window.")
editmenu.AppendSeparator()
self.editAxisLabels = editmenu.Append(ID_AXIS,"Axis / Labels", "Edit the axis and labels of the current plot.")
self.editLinesLegends = editmenu.Append(ID_LINES, "Lines / Legends", "Edit the lines and the legend of the current plot.")
# View
self.viewGrid = viewmenu.Append(ID_GRID,"&Grid","Show/Hide Grid.",kind=wx.ITEM_CHECK)
viewmenu.AppendSeparator() #Append a seperator
self.viewMove = viewmenu.Append(ID_MOVE,"Move","Use the mouse to move the plot.",kind=wx.ITEM_RADIO)
self.viewZoom = viewmenu.Append(ID_ZOOM,"Zoom","Use the mouse for zooming.",kind=wx.ITEM_RADIO)
viewmenu.AppendSeparator()
self.viewResize = viewmenu.Append(ID_RESIZE, "Resize", "Resize the current plot.")
#Check items
viewmenu.Check(ID_GRID, self.grid)
viewmenu.Check(ID_ZOOM, self.zoom)
viewmenu.Check(ID_MOVE, self.move)
# Help
self.helpLicense = helpmenu.Append(ID_LICENSE, "License","Show the license.")
self.helpAbout = helpmenu.Append(wx.ID_ABOUT, "&About"," Information about this program.")
#Setting up the menu
menuBar.Append(filemenu,"&File") #Adding the "filemenu" to the MenuBar
menuBar.Append(editmenu,"&Edit") #Adding the "editmenu" to the MenuBar
menuBar.Append(viewmenu,"&View") #Adding the "viewmenu" to the MenuBar
menuBar.Append(helpmenu,"&Help") #Adding the "helpmenu" to the MenuBar
#Binding the events
self.Bind(wx.EVT_MENU, self.OnMenuOpen, self.menuOpen)
self.Bind(wx.EVT_MENU, self.OnMenuSaveFig, self.menuSaveFig)
self.Bind(wx.EVT_MENU, self.OnMenuExit, self.menuExit)
self.Bind(wx.EVT_MENU, self.OnMenuAdd, self.editAdd)
self.Bind(wx.EVT_MENU, self.OnMenuClear, self.editClear)
self.Bind(wx.EVT_MENU, self.OnMenuAxisLabels, self.editAxisLabels)
self.Bind(wx.EVT_MENU, self.OnMenuLinesLegends, self.editLinesLegends)
self.Bind(wx.EVT_MENU, self.OnMenuResize, self.viewResize)
self.Bind(wx.EVT_MENU, self.OnMenuGrid, self.viewGrid)
self.Bind(wx.EVT_MENU, self.OnMenuMove, self.viewMove)
self.Bind(wx.EVT_MENU, self.OnMenuZoom, self.viewZoom)
self.Bind(wx.EVT_MENU, self.OnMenuLicense, self.helpLicense)
self.Bind(wx.EVT_MENU, self.OnMenuAbout, self.helpAbout)
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
#Set keyboard shortcuts
hotKeysTable = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord("O"), self.menuOpen.GetId()),
(wx.ACCEL_CTRL, ord("S"), self.menuSaveFig.GetId()),
(wx.ACCEL_CTRL, ord("X"), self.menuExit.GetId())])
self.SetAcceleratorTable(hotKeysTable)
#Disable Lines and Legends
self.editLinesLegends.Enable(False)
def OnMenuMove(self, event):
self.move = True
self.zoom = False
for i in range(self.noteBook.GetPageCount()):
self.noteBook.GetPage(i).UpdateSettings(move = self.move,
zoom = self.zoom)
def OnMenuZoom(self, event):
self.move = False
self.zoom = True
for i in range(self.noteBook.GetPageCount()):
self.noteBook.GetPage(i).UpdateSettings(move = self.move,
zoom = self.zoom)
def OnMenuExit(self, event):
self.Destroy() #Close the GUI
def OnMenuAbout(self, event):
dlg = wx.MessageDialog(self, 'JModelica.org Plot GUI.\n', 'About',
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnMenuResize(self, event):
IDPlot = self.noteBook.GetSelection()
self.noteBook.GetPage(IDPlot).ReSize()
def OnMenuOpen(self, event):
#Open the file window
dlg = wx.FileDialog(self, "Open result file(s)",
wildcard="Supported files (.txt, .mat, .csv)|*.txt;*.mat;*.csv|Text files (.txt)|*.txt|MATLAB files (.mat)|*.mat|Comma-Separated Values files (.csv)|*.csv|All files (*.*)|*.*",
style=wx.FD_MULTIPLE)
#If OK load the results
if dlg.ShowModal() == wx.ID_OK:
for n in dlg.GetFilenames():
self._OpenFile(O.path.join(dlg.GetDirectory(),n))
dlg.Destroy() #Destroy the popup window
def OnMenuSaveFig(self, event):
#Open the file window
dlg = wx.FileDialog(self, "Choose a filename to save to",wildcard="Portable Network Graphics (*.png)|*.png|" \
"Encapsulated Postscript (*.eps)|*.eps|" \
"Enhanced Metafile (*.emf)|*.emf|" \
"Portable Document Format (*.pdf)|*.pdf|" \
"Postscript (*.ps)|*.ps|" \
"Raw RGBA bitmap (*.raw *.rgba)|*.raw;*.rgba|" \
"Scalable Vector Graphics (*.svg *.svgz)|*.svg;*.svgz",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
#If OK save the figure
if dlg.ShowModal() == wx.ID_OK:
self.SetStatusText("Saving figure...") #Change the statusbar
IDPlot = self.noteBook.GetSelection()
self.noteBook.GetPage(IDPlot).Save(dlg.GetPath())
self.SetStatusText("") #Change the statusbar
dlg.Destroy() #Destroy the popup window
def OnMenuAdd(self, event):
#Add a new list for the plot variables connect to the plot
self.PlotVariables.append([])
self.PlotIndex += 1
#Add a new plot panel to the notebook
self.plotPanels.append(PlotPanel(self.noteBook,self.grid,move=self.move, zoom=self.zoom))
self.noteBook.AddPage(self.plotPanels[-1],"Plot "+str(self.PlotIndex+1))
#Enable labels and axis options
self.editAxisLabels.Enable(True)
def OnMenuClear(self, event):
#Clear the current activated plot window
IDPlot = self.noteBook.GetSelection()
if IDPlot != -1:
plotWindow = self.noteBook.GetPage(IDPlot)
#Uncheck all variables
for i,var in enumerate(self.noteBook.GetPage(IDPlot).GetPlotVariables()):
self.tree.CheckItem2(var[1],checked=False,torefresh=True)
#Delete all variables
plotWindow.DeleteAllPlotVariables()
#Disable Lines and Legends
self.editLinesLegends.Enable(False)
plotWindow.SetDefaultSettings()
plotWindow.UpdateSettings(axes=[0.0,1.0,0.0,1.0])
plotWindow.Draw()
plotWindow.UpdateSettings(axes=[None,None,None,None])
def OnMenuLinesLegends(self, event):
IDPlot = self.noteBook.GetSelection()
plotWindow = self.noteBook.GetPage(IDPlot)
#Create the axis dialog
dlg = DialogLinesLegends(self,self.noteBook.GetPage(IDPlot))
#Open the dialog and update options if OK
if dlg.ShowModal() == wx.ID_OK:
dlg.ApplyChanges() #Apply Changes
legend = dlg.GetValues()
plotWindow.UpdateSettings(legendposition=legend)
plotWindow.Draw()
#Destroy the dialog
dlg.Destroy()
def OnMenuAxisLabels(self, event):
IDPlot = self.noteBook.GetSelection()
plotWindow = self.noteBook.GetPage(IDPlot)
#Create the axis dialog
dlg = DialogAxisLabels(self,self.noteBook.GetPage(IDPlot))
#Open the dialog and update options if OK
if dlg.ShowModal() == wx.ID_OK:
xmax,xmin,ymax,ymin,title,xlabel,ylabel,xscale,yscale = dlg.GetValues()
try:
xmax=float(xmax)
except ValueError:
xmax=None
try:
xmin=float(xmin)
except ValueError:
xmin=None
try:
ymax=float(ymax)
except ValueError:
ymax=None
try:
ymin=float(ymin)
except ValueError:
ymin=None
plotWindow.UpdateSettings(axes=[xmin,xmax,ymin,ymax],
title=title,xlabel=xlabel,ylabel=ylabel,
xscale=xscale, yscale=yscale)
plotWindow.DrawSettings()
#Destroy the dialog
dlg.Destroy()
def OnMenuGrid(self, event):
self.grid = not self.grid
for i in range(self.noteBook.GetPageCount()):
self.noteBook.GetPage(i).UpdateSettings(grid = self.grid)
self.noteBook.GetPage(i).DrawSettings()
def OnTreeItemChecked(self, event):
self.SetStatusText("Drawing figure...")
item = event.GetItem()
#ID = self.tree.FindIndexParent(item)
ID = -1 #Not used
IDPlot = self.noteBook.GetSelection()
if IDPlot != -1: #If there exist a plot window
data = self.tree.GetPyData(item)
#print "Variable: ", data["variable_id"], item
#Store plot variables or "unstore"
if self.tree.IsItemChecked(item): #Draw
#Add to Plot panel
self.noteBook.GetPage(IDPlot).AddPlotVariable(ID,item,data)
data["item_checked"] = IDPlot
else: #Undraw
#Remove from panel
self.noteBook.GetPage(IDPlot).DeletePlotVariable(data["variable_id"])
data["item_checked"] = None
self.noteBook.GetPage(IDPlot).Draw()
lines = self.noteBook.GetPage(IDPlot).GetLines()
if len(lines) != 0:
#Enable Lines and Legends
self.editLinesLegends.Enable(True)
else:
#Disable Lines and Legends
self.editLinesLegends.Enable(False)
else: #Dont allow an item to be checked if there exist no plot window
self.tree.CheckItem2(item,checked=False,torefresh=True)
self.SetStatusText("")
def OnKeyPress(self, event):
keycode = event.GetKeyCode() #Get the key pressed
#If the key is Delete
if keycode == wx.WXK_DELETE:
self.SetStatusText("Deleting Result...")
ID = self.tree.FindIndexParent(self.tree.GetSelection())
data = self.tree.GetPyData(self.tree.GetSelection())
IDPlot = self.noteBook.GetSelection()
if ID >= 0: #If id is less then 0, no item is selected
self.ResultFiles.pop(ID) #Delete the result object from the list
self.tree.DeleteParent(self.tree.GetSelection())
#Redraw
for i in range(self.noteBook.GetPageCount()):
self.noteBook.GetPage(i).DeletePlotVariable(global_id=data["result_id"])
self.noteBook.GetPage(i).Draw()
self.SetStatusText("")
def OnCloseTab(self, event):
self.OnTabChanging(event)
self.PlotVariables.pop(event.GetSelection()) #Delete the plot
self.plotPanels.pop(event.GetSelection()) #MAYBE!
#variables associated with the current plot
#Disable changing of labels and axis if there is no Plot
if self.noteBook.GetPageCount() == 1:
self.editAxisLabels.Enable(False)
self.editLinesLegends.Enable(False)
def OnTabChanging(self, event):
#print "Changing: ", self.noteBook.GetSelection()
self.UpdateCheckedItemTree(check=False)
def OnTabChanged(self,event):
#print "Changed: ", self.noteBook.GetSelection()
self.UpdateCheckedItemTree(check=True)
def UpdateCheckedItemTree(self, check=True):
#print "Update: ", self.noteBook.GetSelection()
IDPlot = self.noteBook.GetSelection()
#Check the items related to the previous plot
if IDPlot != -1:
for i,var in enumerate(self.noteBook.GetPage(IDPlot).GetPlotVariables()):
self.tree.CheckItem2(var[1],checked=check,torefresh=True)
lines = self.noteBook.GetPage(IDPlot).GetLines()
if len(lines) != 0:
#Enable Lines and Legends
self.editLinesLegends.Enable(True)
else:
#Disable Lines and Legends
self.editLinesLegends.Enable(False)
def OnMenuLicense(self, event):
desc = "Copyright (C) 2011 Modelon AB\n\n"\
"This program is free software: you can redistribute it and/or modify "\
"it under the terms of the GNU Lesser General Public License as published by "\
"the Free Software Foundation, version 3 of the License.\n\n"\
"This program is distributed in the hope that it will be useful, "\
"but WITHOUT ANY WARRANTY; without even the implied warranty of "\
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the "\
"GNU Lesser General Public License for more details.\n\n"\
"You should have received a copy of the GNU Lesser General Public License "\
"along with this program. If not, see <http://www.gnu.org/licenses/>. "
dlg = wx.MessageDialog(self, desc, 'License', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def _OpenFile(self,filename):
# Extract filename and result name
n = str(filename)
if n.find('\\') > n.find('/'):
res_name = n.split('\\')[-1]
else:
res_name = n.split('/')[-1]
failToLoad = False
self.SetStatusText("Loading "+n+"...") #Change the statusbar
#Find out if the result is a textual or binary file
if n.lower().endswith(".txt"): #Textual file
try:
self.ResultFiles.append((res_name,ResultDymolaTextual(n)))
except (JIOError, IOError):
self.SetStatusText("Could not load "+n+".") #Change the statusbar
failToLoad = True
elif n.lower().endswith(".mat"): #Binary file
try:
self.ResultFiles.append((res_name,ResultDymolaBinary(n)))
except (TypeError, IOError):
self.SetStatusText("Could not load "+n+".") #Change the statusbar
failToLoad = True
elif n.lower().endswith(".csv"): #Binary file
try:
self.ResultFiles.append((res_name,ResultCSVTextual(n)))
except (TypeError, IOError, ValueError):
self.SetStatusText("Could not load "+n+". Trying with delimiter ','.") #Change the statusbar
try:
self.ResultFiles.append((res_name,ResultCSVTextual(n,delimiter=",")))
except (TypeError, IOError, ValueError):
self.SetStatusText("Could not load "+n+".") #Change the statusbar
failToLoad = True
else:
self.SetStatusText("Could not load "+n+".") #Change the statusbar
failToLoad = True
if failToLoad:
self.SetStatusText("Could not open file '" + n + "'!\n")
else:
self.SetStatusText("Populating tree for " +n+"...")
self.tree.AddTreeNode(self.ResultFiles[-1][1], self.ResultFiles[-1][0],
self.filterPanel.checkBoxTimeVarying.GetValue(),
self.filterPanel.checkBoxParametersConstants.GetValue(),
self.filterPanel.GetFilter())
self.ResultIndex += 1 #Increment the index
self.SetStatusText("") #Change the statusbar
class VariableTree(wxCustom.CustomTreeCtrl):
def __init__(self, noteBook, *args, **kwargs):
super(VariableTree, self).__init__(*args, **kwargs)
self.noteBook = noteBook
#Add the root item
self.root = self.AddRoot("Result(s)")
#Root have children
self.SetItemHasChildren(self.root)
#Internal counter for all children
self.global_id = 0 #Global ID for each loaded results (unique for each results
self.local_id = 0 #Local ID for each loaded variable (unique for each variable and results)
self.node_id = 0 #Node ID for each node with childrens
#List of hidden children
self.hidden_children = []
self.hidden_nodes = {}
self.nodes = {}
#Internal flags
self._update_top_siblings = True
def RefreshSelectedUnder(self, item):
"""
Refreshes the selected items under the given item.
:param `item`: an instance of L{GenericTreeItem}.
"""
if self._freezeCount:
return
if item.IsSelected():
self.RefreshLine(item)
children = item.GetChildren()
for child in children:
if child.IsSelected():
self.RefreshLine(child)
# Workaround for bug in customtreectrl in wx making
# this function not work there
def SortChildren(self, item):
children = item.GetChildren()
if len(children) > 1:
self._dirty = True
from functools import cmp_to_key
children.sort(key=cmp_to_key(self.OnCompareItems))
def AddTreeNode(self, resultObject, name,timeVarying=None,parametersConstants=None,filter=None):
#Freeze the window temporarely
self.Freeze()
#Add a new dictionary for the nodes
self.nodes[self.global_id] = {}
self._update_top_siblings = True
child = self.AppendItem(self.root, name, data={"result_id":self.global_id, "node_id": self.node_id})
self.SetItemHasChildren(child,True)
self.nodes[self.global_id][self.node_id] = {"node":child, "node_id":self.node_id, "name":name, "parent_node":self.root, "parent_node_id": -1}
self.nodes[self.global_id][-1] = {"node":child, "node_id":self.node_id, "name":name, "parent_node":self.root, "parent_node_id": -2}
self.node_id = self.node_id + 1 #Increment the nodes
rec = {"root":child}
for item in resultObject.name:
spl = item.split(".")
#Python object for storing data related to the variable
data={}
data["timevarying"] = None #resultObject.is_variable(item)
#data["traj"] = resultObject.get_variable_data(item)
data["traj"] = resultObject
data["name"] = item
data["full_name"] = item
data["result_id"] = self.global_id
data["variable_id"] = self.local_id = self.local_id + 1
data["result_object"] = resultObject
data["item_checked"] = None
if len(spl)==1:
data["parents"] = child
data["child"] = item
data["node_id"] = self.GetPyData(child)["node_id"]
self.AppendItem(child, item,ct_type=1, data=data)
else:
#Handle variables of type der(---.---.x)
if spl[0].startswith("der(") and spl[-1].endswith(")"):
spl[0]=spl[0][4:]
spl[-1] = "der("+spl[-1]
tmp_str = ""
tmp_str_old = ""
for i in range(len(spl)-1):
#See if the sub directory already been added, else add
tmp_str_old = tmp_str
tmp_str += spl[i]
try:
rec[tmp_str]
except KeyError:
local_data = {"result_id":self.global_id, "node_id":self.node_id}
if i==0:
rec[tmp_str] = self.AppendItem(child, spl[i], data=local_data)
local_dict = {"node":rec[tmp_str], "node_id":self.node_id, "name":spl[i], "parent_node":child, "parent_node_id": -1}
self.nodes[self.global_id][self.node_id] = local_dict
else:
rec[tmp_str] = self.AppendItem(rec[tmp_str_old], spl[i], data=local_data)
local_dict = {"node":rec[tmp_str], "node_id":self.node_id, "name":spl[i], "parent_node":rec[tmp_str_old], "parent_node_id": self.GetPyData(rec[tmp_str_old])["node_id"]}
self.nodes[self.global_id][self.node_id] = local_dict
self.SetItemHasChildren(rec[tmp_str],True)
self.node_id = self.node_id + 1 #Increment the nodes
else:
data["parents"] = rec[tmp_str]
data["child"] = spl[-1]
data["node_id"] = self.GetPyData(rec[tmp_str],)["node_id"]
self.AppendItem(rec[tmp_str], spl[-1], ct_type=1, data=data)
self.SortChildren(child)
#Increment global id
self.global_id = self.global_id + 1
#print "Adding: ", name, "Options: ", timeVarying, parametersConstants, filter
#print "Condition: ", timeVarying == False or parametersConstants == False or filter != None
#Hide nodes if options are choosen
if timeVarying == False or parametersConstants == False or filter != None:
self.HideNodes(timeVarying,parametersConstants,filter)
#Un-Freeze the window
self.Thaw()
def FindLoneChildDown(self, child):
"""
Search for the youngest child down the tree from "child".
Parameters::
child - The item from where the search should start.
Returns::
child - The youngest child from the starting point.
"""
while True:
nextItem,cookie = self.GetNextChild(child,0)
if nextItem != None:
child = nextItem
else:
break
return child
def FindFirstSiblingUp(self,child,itemParent):
"""
Search for the first sibling of "child" going up in tree.
"""
while child != itemParent:
nextItem = self.GetNextSibling(child)
if nextItem != None:
return nextItem
child = self.GetItemParent(child)
return child
def HideNodes(self, showTimeVarying=None, showParametersConstants=None, filter=None):
"""
Hide nodes depending on the input.
Parameters::
showTimeVarying - Hides or Shows the time varying variables.
showParametersConstants - Hides or Show the parameters.
"""
itemParent = self.GetRootItem()
child,cookie = self.GetFirstChild(itemParent)
found_child = child
top_siblings = self.FindTopSiblings()
#Hide items if any of the options are True
if showTimeVarying == False or showParametersConstants == False or filter != None:
while child != itemParent and child != None:
already_hidden = False
#Find the first youngest child
found_child = self.FindLoneChildDown(child)
#Find the first sibling up
child = self.FindFirstSiblingUp(found_child, itemParent)
data = self.GetPyData(found_child)
if found_child in top_siblings:
print("Found child in top siblings, ", self.GetItemText(found_child))
continue
#print "Found child:", self.GetItemText(found_child)
#print "Child: ", self.GetItemText(child), self.GetPyData(child), "Has Children: ", self.HasChildren(child)
if data == None:
print("Found (wrong) child:", self.GetItemText(found_child))
raise Exception
try:
data["timevarying"]
except KeyError:
print("Found (wrong (exception)) child:", self.GetItemText(found_child))
raise Exception
if data["timevarying"] == None:
data["timevarying"] = data["result_object"].is_variable(data["full_name"])
#Enable or disable depending on input to method
if showTimeVarying == False and data["timevarying"]:
self.HideItem(found_child, showTimeVarying)
#Delete the parent if it has no children
self.HideNodeItem(found_child)
already_hidden = True
if showParametersConstants == False and not data["timevarying"]:
self.HideItem(found_child, showParametersConstants)
#Delete the parent if it has no children
self.HideNodeItem(found_child)
already_hidden = True
if not already_hidden and filter != None and not match(data["full_name"], filter):
self.HideItem(found_child, show=False)
#Delete the parent if it has no children
self.HideNodeItem(found_child)
#Re-add items if any of the options are True
if showTimeVarying == True or showParametersConstants == True or filter != None:
self.AddHiddenItems(showTimeVarying, showParametersConstants, filter)
def FindTopSiblings(self):
"""
Finds all the siblings one level down from root.
"""
if self._update_top_siblings:
itemParent = self.GetRootItem()
child,cookie = self.GetFirstChild(itemParent)
siblings = []
while child != None:
siblings.append(child)
child = self.GetNextSibling(child)
self._top_siblings = siblings
else:
siblings = self._top_siblings
self._update_top_siblings = False
return siblings
def AddHiddenItems(self, showTimeVarying=None, showParametersConstants=None, filter=None):
#print "Adding hidden items: ", showTimeVarying, showParametersConstants, filter
i = 0
while i < len(self.hidden_children):
data = self.hidden_children[i]
matching = False
#Do not add any items!
if data["timevarying"] and showTimeVarying == False or not data["timevarying"] and showParametersConstants == False:
i = i+1
continue
if filter != None:
matching = match(data["full_name"], filter)
if data["timevarying"] and showTimeVarying == True and (filter == None or filter != None and matching == True) or \
not data["timevarying"] and showParametersConstants == True and (filter == None or filter != None and matching == True):
#or filter != None and match(data["full_name"], filter):
if self.nodes[data["result_id"]][data["node_id"]]["node"] == None:
self.AddHiddenNodes(data)
#print "Adding: ", data
#print "At node: ", self.nodes[data["result_id"]][data["node_id"]]
item = self.AppendItem(self.nodes[data["result_id"]][data["node_id"]]["node"], data["child"],ct_type=1, data=data)
if item == None:
raise Exception("Something went wrong when adding the variable.")
if data["item_checked"] is not None: #Item was previously checked.
#print "Item was previously checked", data["item_checked"]
self.noteBook.GetPage(data["item_checked"]).UpdatePlotVariableReference(-1,item,data)
self.hidden_children.pop(i)
i = i-1
i = i+1
def AddHiddenNodes(self, data):
node = self.nodes[data["result_id"]][data["node_id"]]
nodes_to_be_added = [node]
while node["node"] == None and node["parent_node_id"] != -1:
node = self.nodes[data["result_id"]][node["parent_node_id"]]
if node["node"] != None:
break
nodes_to_be_added.append(node)
#print "Nodes to be added: ", nodes_to_be_added
for i in range(len(nodes_to_be_added)):
node = nodes_to_be_added[-(i+1)]
#print "Adding node: ", node, " at ", self.nodes[data["result_id"]][node["parent_node_id"]], " or ", self.nodes[data["result_id"]][-1], data
local_data = {"result_id":data["result_id"], "node_id":node["node_id"]}
"""
if node["parent_node_id"] == -1:
item = self.AppendItem(self.nodes[data["result_id"]][-1], node["name"], data=local_data)
else:
item = self.AppendItem(node["parent_node_id"], node["name"], data=local_data)
"""
item = self.AppendItem(self.nodes[data["result_id"]][node["parent_node_id"]]["node"], node["name"], data=local_data)
#item = self.AppendItem(node["parent_node"], node["name"], data=local_data)
self.SetItemHasChildren(item, True)
self.nodes[data["result_id"]][node["node_id"]]["node"] = item
#print "Node info after adding: ", self.nodes[data["result_id"]][node["node_id"]]
def HideNodeItem(self, item):
"""
Deletes the parents that does not have any children
"""
parent = self.GetItemParent(item)
top_siblings = self.FindTopSiblings()
while self.HasChildren(parent) == False and parent not in top_siblings:
old_parent = self.GetItemParent(parent)
#Add the deleted nodes to the hidden list so that we can recreate the list
#self.hidden_nodes.append(self.GetPyData(parent))
#self.hidden_nodes[self.GetPyData(parent)["node_id"]] = [self.GetPyData(parent), old_parent]
#self.nodes[self.GetPyData(parent)["result_id"]][self.GetPyData(parent)["node_id"]][0] = None
self.nodes[self.GetPyData(parent)["result_id"]][self.GetPyData(parent)["node_id"]]["node"] = None
self.Delete(parent)
parent = old_parent
def HideItem(self, item, show):
data = self.GetPyData(item)
if not show:
self.hidden_children.append(data)
self.Delete(item)
def DeleteParent(self, item):
"""
Delete the oldest parent of item, except root.
"""
if item == self.GetRootItem():
return False
parentItem = self.GetItemParent(item)
while parentItem != self.GetRootItem():
item = parentItem
parentItem = self.GetItemParent(item)
#Remove also the hidden items contained in the hidden children list
data = self.GetPyData(item)
i = 0
while i < len(self.hidden_children):
if self.hidden_children[i]["result_id"] == data["result_id"]:
self.hidden_children.pop(i)
i = i-1
i = i+1
#Delete hidden nodes
self.nodes.pop(data["result_id"])
self.Delete(item) #Delete the parent from the Tree
def FindIndexParent(self, item):
"""
Find the index of the oldest parent of item from one level down
from root.
"""
if item == self.GetRootItem():
return -1
parentItem = item
item = self.GetItemParent(parentItem)
while item != self.GetRootItem():
parentItem = item
item = self.GetItemParent(parentItem)
root = self.GetRootItem()
sibling,cookie = self.GetFirstChild(root)
index = 0
while parentItem != sibling:
sibling = self.GetNextSibling(sibling)
index += 1
return index
class DialogLinesLegends(wx.Dialog):
def __init__(self, parent, plotPage):
wx.Dialog.__init__(self, parent, -1, "Lines and Legends")
#Get the variables
self.variables = plotPage.GetPlotVariables()
#Get the settings
settings = plotPage.GetSettings()
#Get the lines
lines = plotPage.GetLines()
#First line
line1 = lines[0]
names = [i[2]["name"] for i in self.variables]
lineStyles = ["-","--","-.",":"]
colors = ["Auto","Blue","Green","Red","Cyan","Magenta","Yellow","Black","White"]
lineStylesNames = ["Solid","Dashed","Dash Dot","Dotted"]
markerStyles = ["None",'D','s','_','^','d','h','+','*',',','o','.','p','H','v','x','>','<']
markerStylesNames = ["None","Diamond","Square","Horizontal Line","Triangle Up","Thin Diamond","Hexagon 1","Plus","Star","Pixel","Circle",
"Point","Pentagon","Hexagon 2", "Triangle Down", "X", "Triangle Right", "Triangle Left"]
legendPositions = ['Hide','Best','Upper Right','Upper Left','Lower Left','Lower Right','Right','Center Left','Center Right','Lower Center','Upper Center','Center']
self.lineStyles = lineStyles
self.markerStyles = markerStyles
self.colors = colors
#Create the legend dict from where to look up positions
self.LegendDict = dict((item,i) for i,item in enumerate(legendPositions[1:]))
self.LegendDict["Hide"] = -1
#Create the line style dict
self.LineStyleDict = dict((item,i) for i,item in enumerate(lineStyles))
#Create the marker dict
self.MarkerStyleDict = dict((item,i) for i,item in enumerate(markerStyles))
#Create the color dict
self.ColorsDict = dict((item,i) for i,item in enumerate(colors))
mainSizer = wx.BoxSizer(wx.VERTICAL)
bagSizer = wx.GridBagSizer(11, 11)
plotLabelStatic = wx.StaticText(self, -1, "Label")
plotStyleStatic = wx.StaticText(self, -1, "Style")
plotMarkerStyleStatic = wx.StaticText(self, -1, "Style")
plotLineStatic = wx.StaticText(self, -1, "Line")
plotMarkerStatic = wx.StaticText(self, -1, "Marker")
plotLegendStatic = wx.StaticText(self, -1, "Legend")
plotPositionStatic = wx.StaticText(self, -1, "Position")
plotWidthStatic = wx.StaticText(self, -1, "Width")
plotColorStatic = wx.StaticText(self, -1, "Color")
plotMarkerSizeStatic = wx.StaticText(self, -1, "Size")
sizeWidth = 170
#Set the first line as default
self.plotLines = wx.ComboBox(self, -1, size=(220, -1), choices=names, style=wx.CB_READONLY)
self.plotLines.SetSelection(0)
#Set the first line as default
self.plotLineStyle = wx.ComboBox(self, -1, size=(sizeWidth, -1), choices=lineStylesNames, style=wx.CB_READONLY)
self.plotMarkerStyle = wx.ComboBox(self, -1, size=(sizeWidth, -1), choices=markerStylesNames, style=wx.CB_READONLY)
#Set the first label as default
self.plotLineName = wx.TextCtrl(self, -1, "", style = wx.TE_LEFT , size =(sizeWidth,-1))
self.plotWidth = wx.TextCtrl(self, -1, "", style = wx.TE_LEFT, size=(sizeWidth,-1))
self.plotMarkerSize = wx.TextCtrl(self, -1, "", style = wx.TE_LEFT, size=(sizeWidth,-1))
self.plotColor = wx.ComboBox(self, -1, choices=colors, size=(sizeWidth,-1),style=wx.CB_READONLY)
#Define the legend
self.plotLegend = wx.ComboBox(self, -1, size=(sizeWidth, -1), choices=legendPositions, style=wx.CB_READONLY)
self.plotLegend.SetSelection(plotPage.GetLegendLocation()+1)
#Get the FONT
font = plotLineStatic.GetFont()
font.SetWeight(wx.BOLD)
#Set the bold font to the sections
plotLineStatic.SetFont(font)
plotMarkerStatic.SetFont(font)
plotLegendStatic.SetFont(font)
bagSizer.Add(self.plotLines,(0,0),(1,2))
bagSizer.Add(plotLabelStatic,(1,0))
bagSizer.Add(self.plotLineName,(1,1))
bagSizer.Add(plotLineStatic,(2,0),(1,1))
bagSizer.Add(plotStyleStatic,(3,0))
bagSizer.Add(self.plotLineStyle,(3,1))
bagSizer.Add(plotWidthStatic,(4,0))
bagSizer.Add(self.plotWidth,(4,1))
bagSizer.Add(plotColorStatic,(5,0))
bagSizer.Add(self.plotColor,(5,1))
bagSizer.Add(plotMarkerStatic,(6,0),(1,1))
bagSizer.Add(plotMarkerStyleStatic,(7,0))
bagSizer.Add(self.plotMarkerStyle,(7,1))
bagSizer.Add(plotMarkerSizeStatic,(8,0))
bagSizer.Add(self.plotMarkerSize,(8,1))
bagSizer.Add(plotLegendStatic,(9,0),(1,1))
bagSizer.Add(plotPositionStatic,(10,0))
bagSizer.Add(self.plotLegend,(10,1))
#Create OK,Cancel and Apply buttons
self.buttonOK = wx.Button(self, wx.ID_OK)
self.buttonCancel = wx.Button(self, wx.ID_CANCEL)
self.buttonApply = wx.Button(self, wx.ID_APPLY)
buttonSizer = wx.StdDialogButtonSizer()
buttonSizer.AddButton(self.buttonOK)
buttonSizer.AddButton(self.buttonCancel)
buttonSizer.AddButton(self.buttonApply)
buttonSizer.Realize()
#Add information to the sizers
mainSizer.Add(bagSizer,0,wx.ALL|wx.EXPAND,20)
mainSizer.Add(buttonSizer,1,wx.ALL|wx.EXPAND,10)
#Set the main sizer to the panel
self.SetSizer(mainSizer)
#Set size
mainSizer.Fit(self)
#Set the first line as default
self.ChangeLine(self.variables[0])
#Bind events
self.Bind(wx.EVT_COMBOBOX, self.OnLineChange)
self.buttonApply.Bind(wx.EVT_BUTTON, self.OnApply)
def OnApply(self, event):
self.ApplyChanges()
def OnLineChange(self, event):
if self.plotLines.FindFocus() == self.plotLines:
ID = self.plotLines.GetSelection()
self.ChangeLine(self.variables[ID])
def ApplyChanges(self):
ID = self.plotLines.GetSelection()
self.variables[ID][3].name = self.plotLineName.GetValue()
self.variables[ID][3].style = self.lineStyles[self.plotLineStyle.GetSelection()]
self.variables[ID][3].width = float(self.plotWidth.GetValue())
self.variables[ID][3].color = None if self.plotColor.GetSelection()==0 else self.colors[self.plotColor.GetSelection()].lower()
self.variables[ID][3].marker = self.markerStyles[self.plotMarkerStyle.GetSelection()]
self.variables[ID][3].markersize = float(self.plotMarkerSize.GetValue())
def ChangeLine(self, var):
self.plotLineStyle.SetSelection(self.LineStyleDict[var[3].style])
self.plotMarkerStyle.SetSelection(self.MarkerStyleDict[var[3].marker])
self.plotLineName.SetValue(var[3].name)
self.plotWidth.SetValue(str(var[3].width))
self.plotMarkerSize.SetValue(str(var[3].markersize))
if var[3].color == None:
self.plotColor.SetSelection(0)
else:
self.plotColor.SetSelection(self.ColorsDict[var[3].color[0].upper()+var[3].color[1:]])
def GetValues(self):
return self.LegendDict[self.plotLegend.GetValue()]
class DialogAxisLabels(wx.Dialog):
def __init__(self, parent, plotPage):
wx.Dialog.__init__(self, parent, -1, "Axis and Labels")
settings = plotPage.GetSettings()
plotXAxisStatic = wx.StaticText(self, -1, "X-Axis")
plotYAxisStatic = wx.StaticText(self, -1, "Y-Axis")
plotXMaxStatic = wx.StaticText(self, -1, "Max",size =(50,-1))
plotXMinStatic = wx.StaticText(self, -1, "Min",size =(50,-1))
plotTitleStatic = wx.StaticText(self, -1, "Title")
plotXLabelStatic = wx.StaticText(self, -1, "Label")
plotXScaleStatic = wx.StaticText(self, -1, "Scale")
plotYMaxStatic = wx.StaticText(self, -1, "Max",size =(50,-1))
plotYMinStatic = wx.StaticText(self, -1, "Min",size =(50,-1))
plotYLabelStatic = wx.StaticText(self, -1, "Label")
plotYScaleStatic = wx.StaticText(self, -1, "Scale")
font = plotXAxisStatic.GetFont()
font.SetWeight(wx.BOLD)
plotXAxisStatic.SetFont(font)
plotYAxisStatic.SetFont(font)
self.plotYAxisMin = wx.TextCtrl(self, -1, "" if settings["YAxisMin"]==None else str(settings["YAxisMin"]), style = wx.TE_LEFT , size =(150,-1))
self.plotYAxisMax = wx.TextCtrl(self, -1, "" if settings["YAxisMax"]==None else str(settings["YAxisMax"]), style = wx.TE_LEFT , size =(150,-1))
self.plotXAxisMin = wx.TextCtrl(self, -1, "" if settings["XAxisMin"]==None else str(settings["XAxisMin"]), style = wx.TE_LEFT , size =(150,-1))
self.plotXAxisMax = wx.TextCtrl(self, -1, "" if settings["XAxisMax"]==None else str(settings["XAxisMax"]), style = wx.TE_LEFT , size =(150,-1))
self.plotTitle = wx.TextCtrl(self, -1, settings["Title"], style = wx.TE_LEFT , size =(150,-1))
self.plotXLabel = wx.TextCtrl(self, -1, settings["XLabel"], style = wx.TE_LEFT , size =(150,-1))
self.plotYLabel = wx.TextCtrl(self, -1, settings["YLabel"], style = wx.TE_LEFT , size =(150,-1))
self.plotXScale = wx.ComboBox(self, -1, size=(150, -1), choices=["Linear","Log"], style=wx.CB_READONLY)
self.plotYScale = wx.ComboBox(self, -1, size=(150, -1), choices=["Linear","Log"], style=wx.CB_READONLY)
self.plotXScale.SetSelection(0 if settings["XScale"]=="Linear" else 1)
self.plotYScale.SetSelection(0 if settings["YScale"]=="Linear" else 1)
mainSizer = wx.BoxSizer(wx.VERTICAL)
bagSizer = wx.GridBagSizer(10, 10)
bagSizer.Add(plotTitleStatic,(0,0))
bagSizer.Add(self.plotTitle, (0,1))
bagSizer.Add(plotXAxisStatic,(1,0),(1,1))
bagSizer.Add(plotXMinStatic,(2,0))
bagSizer.Add(self.plotXAxisMin,(2,1))
bagSizer.Add(plotXMaxStatic,(3,0))
bagSizer.Add(self.plotXAxisMax,(3,1))
bagSizer.Add(plotXLabelStatic,(4,0))
bagSizer.Add(self.plotXLabel,(4,1))
bagSizer.Add(plotXScaleStatic,(5,0))
bagSizer.Add(self.plotXScale,(5,1))
bagSizer.Add(plotYAxisStatic,(6,0),(1,1))
bagSizer.Add(plotYMinStatic,(7,0))
bagSizer.Add(self.plotYAxisMin,(7,1))
bagSizer.Add(plotYMaxStatic,(8,0))
bagSizer.Add(self.plotYAxisMax,(8,1))
bagSizer.Add(plotYLabelStatic,(9,0))
bagSizer.Add(self.plotYLabel,(9,1))
bagSizer.Add(plotYScaleStatic,(10,0))
bagSizer.Add(self.plotYScale,(10,1))
bagSizer.AddGrowableCol(1)
#Create OK and Cancel buttons
buttonSizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
#Add information to the sizers
mainSizer.Add(bagSizer,0,wx.ALL|wx.EXPAND,20)
mainSizer.Add(buttonSizer,1,wx.ALL|wx.EXPAND,10)
#Set the main sizer to the panel
self.SetSizer(mainSizer)
#Set size
mainSizer.Fit(self)
def GetValues(self):
xmax = self.plotXAxisMax.GetValue()
xmin = self.plotXAxisMin.GetValue()
ymax = self.plotYAxisMax.GetValue()
ymin = self.plotYAxisMin.GetValue()
title = self.plotTitle.GetValue()
xlabel = self.plotXLabel.GetValue()
ylabel = self.plotYLabel.GetValue()
xscale = self.plotXScale.GetValue()
yscale = self.plotYScale.GetValue()
return xmax,xmin,ymax,ymin,title, xlabel, ylabel, xscale, yscale
class FilterPanel(wx.Panel):
def __init__(self, main, parent,tree, **kwargs):
wx.Panel.__init__( self, parent, **kwargs )
#Store the parent
self.main = main
self.parent = parent
self.tree = tree
self.active_filter = False
mainSizer = wx.BoxSizer(wx.VERTICAL)
topBox = wx.StaticBox(self, label = "Filter")
topSizer = wx.StaticBoxSizer(topBox, wx.VERTICAL)
flexGrid = wx.FlexGridSizer(3, 1, 0, 10)
#Create the checkboxes
self.checkBoxParametersConstants = wx.CheckBox(self, -1, " Parameters / Constants")#, size=(140, -1))
self.checkBoxTimeVarying = wx.CheckBox(self, -1, " Time-Varying", size=(140, -1))
self.searchBox = wx.SearchCtrl(self, -1, "Search", size=(190, -1), style=wx.TE_PROCESS_ENTER)
self.searchBox.SetToolTipString("Filter the variables using a unix filename pattern matching \n" \
'(eg. "*der*"). Can also be a list of filters separated by ";"\n' \
"See http://docs.python.org/2/library/fnmatch.html.")
#Check the checkboxes
self.checkBoxParametersConstants.SetValue(True)
self.checkBoxTimeVarying.SetValue(True)
#Add the checkboxes to the flexgrid
flexGrid.Add(self.checkBoxParametersConstants)
flexGrid.Add(self.checkBoxTimeVarying)
flexGrid.Add(self.searchBox)
flexGrid.AddGrowableCol(0)
#Add information to the sizers
topSizer.Add(flexGrid,1,wx.ALL|wx.EXPAND,10)
mainSizer.Add(topSizer,0,wx.EXPAND|wx.ALL,10)
#Set the main sizer to the panel
self.SetSizer(mainSizer)
#Bind events
self.Bind(wx.EVT_CHECKBOX, self.OnParametersConstants, self.checkBoxParametersConstants)
self.Bind(wx.EVT_CHECKBOX, self.OnTimeVarying, self.checkBoxTimeVarying)
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnSearch, self.searchBox)
self.Bind(wx.EVT_TEXT_ENTER, self.OnSearch, self.searchBox)
def GetFilter(self):
if self.active_filter == True:
filter = self.searchBox.GetValue().split(";")
if filter[0] == "": #If the filter is empty, match all
filter = ["*"]
filter_list = convert_filter(filter)
else:
filter_list = None
return filter_list
def OnSearch(self, event):
self.active_filter = True
self.tree.HideNodes(showTimeVarying=self.checkBoxTimeVarying.GetValue(), showParametersConstants=self.checkBoxParametersConstants.GetValue(), filter=self.GetFilter())
self.main.UpdateCheckedItemTree()
if self.searchBox.GetValue() == "":
self.active_filter = False
def OnParametersConstants(self, event):
self.tree.HideNodes(showTimeVarying=self.checkBoxTimeVarying.GetValue(), showParametersConstants=self.checkBoxParametersConstants.GetValue(), filter=self.GetFilter())
self.main.UpdateCheckedItemTree()
def OnTimeVarying(self, event):
self.tree.HideNodes(showTimeVarying=self.checkBoxTimeVarying.GetValue(), showParametersConstants=self.checkBoxParametersConstants.GetValue(), filter=self.GetFilter())
self.main.UpdateCheckedItemTree()
class Lines_Settings:
def __init__(self, name=None):
self.width = rcParams["lines.linewidth"]
self.style = rcParams["lines.linestyle"]
self.marker = rcParams["lines.marker"]
self.markersize = rcParams["lines.markersize"]
self.color = None
self.name = name
class PlotPanel(wx.Panel):
def __init__(self, parent, grid=False,move=True,zoom=False, **kwargs):
wx.Panel.__init__( self, parent, **kwargs )
#Initialize matplotlib
self.figure = Figure(facecolor = 'white')
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.subplot = self.figure.add_subplot( 111 )
self.parent = parent
self.settings = {}
self.settings["Grid"] = grid
self.settings["Zoom"] = zoom
self.settings["Move"] = move
self.SetDefaultSettings() #Set the default settings
self.plotVariables = []
self._resizeflag = False
self._SetSize()
self.DrawSettings()
#Bind events
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(wx.EVT_SIZE, self.OnSize)
#Bind event for resizing (must bind to canvas)
self.canvas.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.canvas.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.canvas.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.canvas.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.canvas.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterWindow)
self.canvas.Bind(wx.EVT_MOTION, self.OnMotion)
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.OnPass)
self._mouseLeftPressed = False
self._mouseMoved = False
def SetDefaultSettings(self):
self.settings["Title"] = ""
self.settings["XLabel"] = "Time [s]"
self.settings["YLabel"] = ""
self.settings["XAxisMax"] = None
self.settings["XAxisMin"] = None
self.settings["YAxisMax"] = None
self.settings["YAxisMin"] = None
self.settings["XScale"] = "Linear"
self.settings["YScale"] = "Linear"
self.settings["LegendPosition"] = 0 #"Best" position
def AddPlotVariable(self, ID, item, data):
lineSettings = Lines_Settings(data["name"])
self.plotVariables.append([ID,item,data,lineSettings])
def GetPlotVariables(self):
return self.plotVariables
def DeleteAllPlotVariables(self):
self.plotVariables = []
def UpdatePlotVariableReference(self, ID,item,data):
self.DeletePlotVariable(local_id=data["variable_id"])
self.AddPlotVariable(ID,item,data)
def DeletePlotVariable(self, local_id=None, global_id=None):
if local_id != None:
for i,var in enumerate(self.plotVariables):
if var[2]["variable_id"] == local_id:
self.plotVariables.pop(i)
break
if global_id != None:
j = 0
while j < len(self.plotVariables):
if self.plotVariables[j][2]["result_id"] == global_id:
self.plotVariables.pop(j)
j = j-1
j = j+1
if j==len(self.plotVariables):
break
def OnPass(self, event):
pass
def OnMotion(self, event):
if self._mouseLeftPressed: #Is the mouse pressed?
self._mouseMoved = True
self._newPos = event.GetPosition()
if self.settings["Move"]:
self.DrawMove()
if self.settings["Zoom"]:
self.DrawRectZoom()
def DrawZoom(self):
try:
y0 = self._figureMin[1][1]-self._lastZoomRect[1]
x0 = self._lastZoomRect[0]-self._figureMin[0][0]
w = self._lastZoomRect[2]
h = self._lastZoomRect[3]
fullW = self._figureMin[1][0]-self._figureMin[0][0]
fullH = self._figureMin[1][1]-self._figureMin[0][1]
if w < 0:
x0 = x0 + w
x0 = max(x0, 0.0)
y0 = max(y0, 0.0)
plotX0 = self.subplot.get_xlim()[0]
plotY0 = self.subplot.get_ylim()[0]
plotW = self.subplot.get_xlim()[1]-self.subplot.get_xlim()[0]
plotH = self.subplot.get_ylim()[1]-self.subplot.get_ylim()[0]
self.settings["XAxisMin"] = plotX0+abs(x0/fullW*plotW)
self.settings["XAxisMax"] = plotX0+abs(x0/fullW*plotW)+abs(w/fullW*plotW)
self.settings["YAxisMin"] = plotY0+abs(y0/fullH*plotH)
self.settings["YAxisMax"] = plotY0+abs(y0/fullH*plotH)+abs(h/fullH*plotH)
self.DrawRectZoom(drawNew=False) #Delete the last zoom rectangle
self.DrawSettings()
except AttributeError:
self.DrawRectZoom(drawNew=False) #Delete the last zoom rectangle
def DrawMove(self):
x0,y0 = self._originalPos
x1,y1 = self._newPos
fullW = self._figureMin[1][0]-self._figureMin[0][0]
fullH = self._figureMin[1][1]-self._figureMin[0][1]
plotX0,plotY0,plotW,plotH = self._plotInfo
self.settings["XAxisMin"] = plotX0+(x0-x1)/fullW*plotW
self.settings["XAxisMax"] = plotX0+plotW+(x0-x1)/fullW*plotW
self.settings["YAxisMin"] = plotY0+(y1-y0)/fullH*plotH
self.settings["YAxisMax"] = plotY0+plotH+(y1-y0)/fullH*plotH
self.DrawSettings()
def DrawRectZoom(self, drawNew=True):
dc = wx.ClientDC(self.canvas)
dc.SetLogicalFunction(wx.XOR)
wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
y1 = min(max(self._newPos[1],self._figureMin[0][1]),self._figureMin[1][1])
y0 = min(max(self._originalPos[1],self._figureMin[0][1]),self._figureMin[1][1])
x1 = min(max(self._newPos[0],self._figureMin[0][0]),self._figureMin[1][0])
x0 = min(max(self._originalPos[0],self._figureMin[0][0]),self._figureMin[1][0])
if y1 > y0:
y0, y1 = y1, y0
if x1 < y0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rectZoom = int(x0), int(y0), int(w), int(h)
try:
self._lastZoomRect
except AttributeError:
pass
else:
dc.DrawRectangle(*self._lastZoomRect) #Erase last
if drawNew:
self._lastZoomRect = rectZoom
dc.DrawRectangle(*rectZoom)
else:
try:
del self._lastZoomRect
except AttributeError:
pass
dc.EndDrawing()
#dc.Destroy()
def OnLeftDown(self, event):
self._mouseLeftPressed = True #Mouse is pressed
#Capture mouse position
self._originalPos = event.GetPosition()
#Capture figure size
self._figureRatio = self.subplot.get_position().get_points()
self._figureSize = (self.canvas.figure.bbox.width,self.canvas.figure.bbox.height)
self._figureMin = [(round(self._figureSize[0]*self._figureRatio[0][0]),round(self._figureSize[1]*self._figureRatio[0][1])),
(round(self._figureSize[0]*self._figureRatio[1][0]),round(self._figureSize[1]*self._figureRatio[1][1]))]
#Capture current plot
plotX0 = self.subplot.get_xlim()[0]
plotY0 = self.subplot.get_ylim()[0]
plotW = self.subplot.get_xlim()[1]-self.subplot.get_xlim()[0]
plotH = self.subplot.get_ylim()[1]-self.subplot.get_ylim()[0]
self._plotInfo = (plotX0, plotY0, plotW, plotH)
def OnLeftUp(self, event):
self._mouseLeftPressed = False #Mouse is not pressed
if self._mouseMoved:
self._mouseMoved = False
if self.settings["Zoom"]:
self.DrawZoom()
if self.settings["Move"]:
self.DrawMove()
def OnLeaveWindow(self, event): #Change cursor
if self._mouseLeftPressed:
self._mouseLeftPressed = False #Mouse not pressed anymore
self._mouseMoved = False
if self.settings["Zoom"]:
self.DrawZoom()
if self.settings["Move"]:
self.DrawMove()
def OnEnterWindow(self, event): #Change cursor
self.UpdateCursor()
def OnRightDown(self, event):
"""
On right click, resize the plot.
"""
self.ReSize()
def ReSize(self):
self.UpdateSettings(axes=[None,None,None,None])
self.DrawSettings()
def OnSize(self, event):
self._resizeflag = True
def OnIdle(self, event):
if self._resizeflag:
self._resizeflag = False
self._SetSize()
def _SetSize(self):
pixels = tuple(self.GetClientSize())
#self.SetSize(pixels) #GENERATES INFINITELY EVENTS ON UBUNTU
self.canvas.SetSize(pixels)
self.figure.set_size_inches(float(pixels[0])/self.figure.get_dpi(),
float(pixels[1])/self.figure.get_dpi())
#def Draw(self, variables=[]):
def Draw(self):
self.subplot.clear()
for i in self.plotVariables:
traj = i[2]["traj"].get_variable_data(i[2]["full_name"])
if i[3].color is None:
#self.subplot.plot(i[2]["traj"].t, i[2]["traj"].x,label=i[3].name,linewidth=i[3].width,marker=i[3].marker,linestyle=i[3].style,markersize=i[3].markersize)
self.subplot.plot(traj.t, traj.x,label=i[3].name,linewidth=i[3].width,marker=i[3].marker,linestyle=i[3].style,markersize=i[3].markersize)
else:
#self.subplot.plot(i[2]["traj"].t, i[2]["traj"].x,label=i[3].name,linewidth=i[3].width,marker=i[3].marker,linestyle=i[3].style,markersize=i[3].markersize,color=i[3].color)
self.subplot.plot(traj.t, traj.x,label=i[3].name,linewidth=i[3].width,marker=i[3].marker,linestyle=i[3].style,markersize=i[3].markersize,color=i[3].color)
self.DrawSettings()
def GetLines(self):
return self.subplot.get_lines()
def GetLegendLocation(self):
res = self.subplot.get_legend()
if res is None:
return -1
else:
return res._loc
def Save(self, filename):
"""
Saves the current figure.
Parameters::
filename - The name of the to be saved plot.
"""
self.figure.savefig(filename)
def DrawSettings(self):
"""
Draws the current settings onto the Plot.
"""
self.subplot.grid(self.settings["Grid"])
#Draw label settings
self.subplot.set_title(self.settings["Title"])
self.subplot.set_xlabel(self.settings["XLabel"])
self.subplot.set_ylabel(self.settings["YLabel"])
#Draw Scale settings
self.subplot.set_xscale(self.settings["XScale"])
self.subplot.set_yscale(self.settings["YScale"])
if len(self.plotVariables) != 0 and self.settings["LegendPosition"] != -1:
self.subplot.legend(loc=self.settings["LegendPosition"])
#Draw axis settings
if self.settings["XAxisMin"] != None:
#self.subplot.set_xlim(left=self.settings["XAxisMin"])
self.subplot.set_xlim(xmin=self.settings["XAxisMin"])
if self.settings["XAxisMax"] != None:
#self.subplot.set_xlim(right=self.settings["XAxisMax"])
self.subplot.set_xlim(xmax=self.settings["XAxisMax"])
if self.settings["XAxisMax"] == None and self.settings["XAxisMin"] == None:
self.subplot.set_xlim(None,None)
self.subplot.set_autoscalex_on(True)
#self.subplot.autoscale(axis="x")
self.subplot.autoscale_view(scalex=True)
if self.settings["YAxisMin"] != None:
#self.subplot.set_ylim(bottom=self.settings["YAxisMin"])
self.subplot.set_ylim(ymin=self.settings["YAxisMin"])
if self.settings["YAxisMax"] != None:
#self.subplot.set_ylim(top=self.settings["YAxisMax"])
self.subplot.set_ylim(ymax=self.settings["YAxisMax"])
if self.settings["YAxisMax"] == None and self.settings["YAxisMin"] == None:
self.subplot.set_ylim(None,None)
self.subplot.set_autoscaley_on(True)
#self.subplot.autoscale(axis="y") #METHOD DOES NOT EXIST ON VERSION LESS THAN 1.0
self.subplot.autoscale_view(scaley=True)
#Draw
self.canvas.draw()
def UpdateSettings(self, grid=None, title=None, xlabel=None,
ylabel=None, axes=None, move=None, zoom=None,
xscale=None, yscale=None, legendposition=None):
"""
Updates the settings dict.
"""
if grid !=None:
self.settings["Grid"] = grid
if title !=None:
self.settings["Title"] = title
if xlabel !=None:
self.settings["XLabel"] = xlabel
if ylabel !=None:
self.settings["YLabel"] = ylabel
if axes != None:
self.settings["XAxisMin"]=axes[0]
self.settings["XAxisMax"]=axes[1]
self.settings["YAxisMin"]=axes[2]
self.settings["YAxisMax"]=axes[3]
if move != None:
self.settings["Move"] = move
if zoom != None:
self.settings["Zoom"] = zoom
if xscale != None:
self.settings["XScale"] = xscale
if yscale != None:
self.settings["YScale"] = yscale
if legendposition != None:
self.settings["LegendPosition"] = legendposition
def UpdateCursor(self):
if self.settings["Move"]:
cursor = wx.StockCursor(wx.CURSOR_HAND)
self.canvas.SetCursor(cursor)
if self.settings["Zoom"]:
cursor = wx.StockCursor(wx.CURSOR_CROSS)
self.canvas.SetCursor(cursor)
def GetSettings(self):
"""
Returns the settigns of the current plot.
"""
return self.settings
def startGUI(filename=None):
"""
Starts GUI.
If a filename is provided, that file is loaded into the GUI on startup.
"""
#Start GUI
app = wx.App(False)
gui = MainGUI(None, -1,filename)
app.MainLoop()
if __name__ == '__main__':
startGUI()
| lgpl-3.0 |
hlin117/statsmodels | statsmodels/tsa/statespace/tests/test_kalman.py | 19 | 22530 | """
Tests for _statespace module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
Hamilton, James D. 1994.
Time Series Analysis.
Princeton, N.J.: Princeton University Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError:
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return (prefix, dtype, None)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace import _statespace as ss
from .results import results_kalman_filter
from numpy.testing import assert_almost_equal, assert_allclose
from nose.exc import SkipTest
prefix_statespace_map = {
's': ss.sStatespace, 'd': ss.dStatespace,
'c': ss.cStatespace, 'z': ss.zStatespace
}
prefix_kalman_filter_map = {
's': ss.sKalmanFilter, 'd': ss.dKalmanFilter,
'c': ss.cKalmanFilter, 'z': ss.zKalmanFilter
}
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Parameters
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Observed data
self.obs = np.array(data['lgdp'], ndmin=2, dtype=dtype, order="F")
# Measurement equation
self.k_endog = k_endog = 1 # dimension of observed data
# design matrix
self.design = np.zeros((k_endog, 4, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [1, 1, 0, 0]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
self.k_states = k_states = 4 # dimension of state space
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype, order="F")
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters'], dtype=dtype
)
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Durbin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](self.true['start']), self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype, conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(np.r_[self.obs[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F")
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.loglikelihood_burn = self.true['start']
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
# Observed data
self.obs = np.array(data, ndmin=2, dtype=dtype, order="C").T
# Parameters
self.k_endog = k_endog = 2 # dimension of observed data
self.k_states = k_states = 6 # dimension of state space
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Measurement equation
# design matrix
self.design = np.zeros((k_endog, k_states, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype)
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'], dtype=dtype
)
self.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.obs_cov[1, 1, 0] = sigma_ec**2
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Drubin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
# self.result['loglike'](self.true['start']),
self.result['loglike'](0),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.init_filter()
self.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(dtype, conserve_memory)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(
np.c_[
self._obs,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.init_filter()
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08,
)
# self.loglikelihood_burn = self.true['start']
self.loglikelihood_burn = 0
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.result['state'][4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.result['state'][5][-1],
self.true_states.iloc[end-1, 3], 4
)
| bsd-3-clause |
aymeric-spiga/mcd-python | perso/inimeso/inimeso5.py | 1 | 4426 | #! /usr/bin/env python
### AS 10/05/2012. A python script to prepare initial state for idealized mesoscale runs.
### use : ensure mcd class is working. fill in input_coord. execute inimeso.
from string import split ; import numpy as np ; import matplotlib.pyplot as mpl
from mcd import mcd
rho_dust = 2500. # Mars dust density (kg.m-3)
grav = 3.72
ksi = 3. / 4. / rho_dust / grav
nueff = 0.5
### MCD INSTANCE and SETTINGS (actually, default. but one never knows)
query = mcd() ; query.zkey = 3 ; query.hrkey = 1
#query.toversion5(version="5.2")
### GET COORDINATES
lines = open("input_coord", "r").readlines()
query.lon = float(split(lines[0])[0]) ; query.lat = float(split(lines[1])[0])
query.xdate = float(split(lines[2])[0]) ; query.loct = float(split(lines[3])[0])
query.dust = float(split(lines[6])[0])
query.printcoord()
### OPEN FILES TO BE WRITTEN
sounding = open("input_sounding", "w") ; additional = open("input_therm", "w") ; more = open("input_more", "w")
dust = open("input_dust", "w") ; water = open("input_water", "w")
### GET and WRITE SURFACE VALUES
query.xz = 0.1 ; query.update() ; query.printmeanvar()
wvapor = query.extvar[42] #1.5*1e-3
wice = query.extvar[44] #0.0
sounding.write( "%10.2f%12.2f%12.2f\n" % (query.pres/100.,query.temp*(610./query.pres)**(1.0/3.9),(wvapor+wice)*1e3) )
more.write( "%10.2f%10.2f" % (query.extvar[1],query.extvar[14]) ) ; more.close()
### GET and WRITE VERTICAL PROFILE
#closesurf = [0.1,5,10,20,50,100]
closesurf = [0.1,0.5,1,2,5,10,20,50,100]
query.profile( tabperso = np.append(closesurf,np.linspace(200.,float(split(lines[4])[0])*1000.,float(split(lines[5])[0]))) )
for iz in range(len(query.prestab)):
wvapor = query.extvartab[iz,42] #1.5*1e-3
wice = query.extvartab[iz,44] #0.0
sounding.write( "%10.2f%12.2f%12.2f%12.2f%12.2f\n" % ( \
query.extvartab[iz,2],query.temptab[iz]*(610./query.prestab[iz])**(1.0/3.9),\
(wvapor+wice)*1e3,\
query.zonwindtab[iz],query.merwindtab[iz]) )
additional.write( "%12.2f%12.2f%18.6e%18.6e%12.2f\n" % ( \
query.extvartab[iz,53],query.extvartab[iz,8],\
query.prestab[iz],query.denstab[iz],query.temptab[iz]) )
water.write( "%18.6e%18.6e\n" % (wvapor*1e3,wice*1e3) )
### DUST PROFILES
q = query.extvartab[iz,38] # extvar(38)= Dust mass mixing ratio (kg/kg)
reff = query.extvartab[iz,39] # extvar(39)= Dust effective radius (m)
print q,reff
N = (grav*ksi*((1+nueff)**3)/np.pi)*q/(reff**3)
dust.write( "%18.6e%18.6e\n" % (q,N) )
### FINISH
sounding.close() ; additional.close() ; dust.close() ; water.close()
query.plot1d(["p","t","u","v","h2ovap","h2oice"]) ; mpl.show()
##### ESTIMATE CCNs
#dustq,dustn = np.loadtxt("input_dust",unpack=True)
#############################################################################
#############################################################################
##fqdust = np.squeeze(query.extvartab[:,38])
#fwice = np.squeeze(query.extvartab[:,44])
#####
##### I: fwice, fqdust --> O: finter
#####
##import numpy as np
#from scipy.interpolate import InterpolatedUnivariateSpline
### remove profile points with a cloud
#x = np.arange(fwice.size)
#w = np.where(fwice < np.mean(fwice)/10.)
### spline-interpolate with remaining points
#yyq = dustq[w]
#xxq = x[w]
#splq = InterpolatedUnivariateSpline(xxq, yyq)
#yyn = dustn[w]
#xxn = x[w]
#spln = InterpolatedUnivariateSpline(xxn, yyn)
### get interpolated profile
### -- a good match for qdust + qccn
### -- treat negative values because of spline
#finterq = splq(x) - dustq
#finterq[np.where(finterq < 0.)] = -finterq[np.where(finterq < 0.)]
#fintern = spln(x) - dustn
#fintern[np.where(fintern < 0.)] = -fintern[np.where(fintern < 0.)]
### plot
#mpl.figure()
#mpl.plot(yyn,xxn,'bo')
#mpl.plot(dustn,x)
#mpl.plot(fintern,x)
#mpl.figure()
#mpl.plot(yyq,xxq,'bo')
#mpl.plot(dustq,x)
#mpl.plot(finterq,x)
#mpl.figure()
#mpl.plot(fwice,x,'b.')
#mpl.show()
#############################################################################
#############################################################################
#
#ccn = open("input_ccn", "w")
#for iz in range(len(query.prestab)):
# print iz,dustq[iz],finterq[iz]
# #ccn.write( "%18.6e%18.6e\n" % (finterq[iz],fintern[iz]) )
# ccn.write( "%18.6e%18.6e\n" % (np.max(finterq),np.max(fintern)) )
#ccn.close()
| gpl-2.0 |
wolfiex/DSMACC-testing | dsmacc/examples/cosine_similarity.py | 1 | 1825 | import tensorflow as tf
import numpy as np
def compute_cosine_distances(a, b):
# x shape is n_a * dim
# y shape is n_b * dim
# results shape is n_a * n_b
normalize_a = tf.nn.l2_normalize(a,1)
normalize_b = tf.nn.l2_normalize(b,1)
distance = 1 - tf.matmul(normalize_a, normalize_b, transpose_b=True)
return distance
input_matrix = np.array([[1, 1, 1],
[0, 2, 1],
[0, 0, 1],
[0, 0, 1]], dtype = 'float32')
a = np.array([[2, 0, 1, 1, 0, 2, 1, 1],[2, 0, 1, 1, 0, 2, 1, 1],[2, 0, 1, 1, 0, 2, 1, 1]], dtype = 'float32')
b = np.array([[2, 0, 1, 1, 0, 1, 1, 1],[9, 0, 1, 1, 0, 2, 1, 1],[2, 0, 1, 1, 0, 2, 1, 1]], dtype = 'float32')
a = b = input_matrix
a = np.array([[2, 0, 1, 1, 0, 2, 1, 1]], dtype = 'float32')
b = np.array([[2, 0, 1, 1, 0, 3, 1, 1]], dtype = 'float32')
with tf.Session() as sess:
e = sess.run(compute_cosine_distances(a,b))
print e
print ''
from sklearn.metrics.pairwise import cosine_distances
print(cosine_distances(a,b))
def compute_cosine_distances(a, b):
# x shape is n_a * dim
# y shape is n_b * dim
# results shape is n_a * n_b
normalize_a = tf.nn.l2_normalize(a,1)
normalize_b = tf.nn.l2_normalize(b,1)
distance = 1 - tf.matmul(normalize_a, normalize_b, transpose_b=True)
return distance
a = np.array([[2, 0, 1]], dtype = 'float32')
b = np.array([[1, 0, 8]], dtype = 'float32')
with tf.Session() as sess:
e = sess.run(compute_cosine_distances(tf.convert_to_tensor(a),tf.convert_to_tensor(b)))
print e
a = np.array([[.2, 0, .1]], dtype = 'float32')
b = np.array([[.1, 0, .1]], dtype = 'float32')
with tf.Session() as sess:
e = sess.run(compute_cosine_distances(tf.convert_to_tensor(a),tf.convert_to_tensor(b)))
print e
print 'fi'
| gpl-3.0 |
sixy6e/geospatial-hdf5 | examples/sieve_example.py | 1 | 2912 | #!/usr/bin/env python
import numpy
from scipy import ndimage
import pandas
from geoh5 import kea
from geoh5.kea import common as kc
# https://github.com/sixy6e/image-processing
from image_processing.segmentation import Segments
"""
Once completed open the file in tuiview to see the colourised segments
and the raster attribute table.
"""
def main():
"""
Create a segmented array.
Compute basic stats for each segment:
(min, max, mean, standard deviation, total, area)
Write the segmented image and the raster attribute table.
The sieving filter will remove segements containing < 30 pixels.
"""
# data dimensions and datatype
dims = (1000, 1000)
dtype = 'int32'
# create some random data and segment via value > 5000
seg_data = numpy.random.randint(0, 10001, dims).astype(dtype)
nlabels = ndimage.label(seg_data > 5000, output=seg_data)
# create some random data to calculate stats against
data = numpy.random.ranf(dims)
# create a segments class object
seg = Segments(seg_data, include_zero=True)
# initial number of segments
print "Number of segments: {}".format(seg.n_segments)
# remove segments containing < 30 pixels
seg.sieve(30)
print "Number of segments: {}".format(seg.n_segments)
# basic stats (min, max, mean, standard deviation, total, area)
stats_table = seg.basic_statistics(data, dataframe=True)
stats_table.set_index("Segment_IDs", inplace=True)
# join via segment id
df = pandas.DataFrame({"Histogram": seg.histogram})
stats_table = df.join(stats_table, how='outer')
nrows = stats_table.shape[0]
# assign random colours to each segment
stats_table.insert(1, "Red", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(2, "Green", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(3, "Blue", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(4, "Alpha", 255)
# set the background data to (0, 0, 0); black
cols = ['Red', 'Green', 'Blue']
stats_table.loc[0, cols] = 0
# define 1 output band and add another band later
kwargs = {'width': dims[1],
'height': dims[0],
'count': 1,
'compression': 4,
'chunks': (100, 100),
'blocksize': 100,
'dtype': dtype}
with kea.open('sieve-example.kea', 'w', **kwargs) as src:
src.write(seg.array, 1)
# define the layer type as thematic (labelled, classified etc)
src.write_layer_type(1, kc.LayerType.thematic)
# write the stats table as an attribute table
usage = {"Red": "Red",
"Green": "Green",
"Blue": "Blue",
"Alpha": "Alpha",
"Histogram": "PixelCount"}
src.write_rat(stats_table, 1, usage=usage)
if __name__ == '__main__':
main()
| mit |
Adai0808/scikit-learn | sklearn/ensemble/weight_boosting.py | 97 | 40773 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
SPIhub/hummingbird | scripts/masking/mask.py | 2 | 3250 | #!/usr/bin/env python
import sys,argparse
import numpy
import os
import time, datetime
import h5py
import scipy.misc
import configobj
def get_valid_stacks(f_names):
f_names_valid = []
for fn in f_names:
with h5py.File(fn,"r") as f:
if "mean" in f.keys():
f_names_valid.append(fn)
return f_names_valid
def get_dims(f_name):
with h5py.File(f_name,"r") as f:
s = numpy.shape(f["mean"])
list(s).pop(0)
return tuple(s)
def get_max_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) < threshold)
def get_min_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) > threshold)
def get_badpixelmask(f_name):
if f_name[-3:] == ".h5":
with h5py.File(f_name, "r"):
m = numpy.array(f["/data/data"])
elif f_name[-4:] == ".png":
m = scipy.misc.imread(f_name,flatten=True) / 255.
return m
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hummingbird mask tool. Creates mask from stack files in current directory and given configuration file.')
parser.add_argument('config', type=str,
help="Configuration file")
parser.add_argument('-l', '--link', type=str, help="Creates symbolic link to the H5 mask from given path")
if(len(sys.argv) == 1):
parser.print_help()
args = parser.parse_args()
C = configobj.ConfigObj(args.config)
files = os.listdir(".")
files = [f for f in files if len(f) > 3]
files = [f for f in files if f[-3:] == ".h5"]
files = get_valid_stacks(files)
if len(files) == 0:
sys.exit(0)
s = get_dims(files[0])
mask = numpy.ones(shape=s, dtype="bool")
if C["mean_max"].lower() != 'none':
mask *= get_max_mask(files, "mean", float(C["mean_max"]))
if C["std_max"].lower() != 'none':
mask *= get_max_mask(files, "std", float(C["std_max"]))
if C["median_max"].lower() != 'none':
mask *= get_max_mask(files, "median", float(C["median_max"]))
if C["mean_min"].lower() != 'none':
mask *= get_min_mask(files, "mean", float(C["mean_min"]))
if C["std_min"].lower() != 'none':
mask *= get_min_mask(files, "std", float(C["std_min"]))
if C["median_min"].lower() != 'none':
mask *= get_min_mask(files, "median", float(C["median_min"]))
if C["badpixelmask"].lower() != 'none':
mask *= get_badpixelmask(C["badpixelmask"])
fn_root = files[-1].split("/")[-1][:-3]
outdir = C["outdir"]
os.system("mkdir -p %s" % outdir)
if bool(C["output_png"].lower()):
import matplotlib.pyplot as pypl
pypl.imsave("%s/mask_%s.png" % (outdir,fn_root), mask, cmap="binary_r", vmin=0, vmax=1)
with h5py.File("%s/mask_%s.h5" % (outdir,fn_root), "w") as f:
f["data/data"] = mask
os.system("cp %s %s/mask_%s.conf" % (args.config,outdir,fn_root))
if args.link:
os.system("ln -s -f %s/mask_%s.h5 %s" % (outdir, fn_root, args.link))
| bsd-2-clause |
q1ang/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
harpolea/pyro2 | analysis/plotvar.py | 1 | 2438 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import argparse
from util import io
import mesh.patch as patch
# plot a single variable from an output file
#
# Usage: ./plotvar.py filename variable
def makeplot(plotfile, variable, outfile,
width=6.5, height=5.25,
log=False, compact=False, quiet=False):
sim = io.read(plotfile)
if isinstance(sim, patch.CellCenterData2d):
myd = sim
else:
myd = sim.cc_data
myg = myd.grid
plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w')
var = myd.get_var(variable)
if log:
var = np.log10(var)
plt.imshow(np.transpose(var.v()),
interpolation="nearest", origin="lower",
extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax])
if not compact:
plt.colorbar()
plt.xlabel("x")
plt.ylabel("y")
if compact:
plt.axis("off")
plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0)
plt.savefig(outfile)
else:
plt.savefig(outfile, bbox_inches="tight")
if not quiet:
plt.show()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-o", type=str, default="plot.png",
metavar="plot.png", help="output file name")
parser.add_argument("--log", action="store_true",
help="plot log of variable")
parser.add_argument("--compact", action="store_true",
help="remove axes and border")
parser.add_argument("--quiet", action="store_true",
help="don't show the figure")
parser.add_argument("-W", type=float, default=6.5,
metavar="width", help="plot width (inches)")
parser.add_argument("-H", type=float, default=5.25,
metavar="height", help="plot height (inches)")
parser.add_argument("plotfile", type=str, nargs=1,
help="the plotfile you wish to plot")
parser.add_argument("variable", type=str, nargs=1,
help="the name of the solver used to run the simulation")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
makeplot(args.plotfile[0], args.variable[0], args.o,
width=args.W, height=args.H,
log=args.log, compact=args.compact, quiet=args.quiet)
| bsd-3-clause |
CI-WATER/TethysCluster | utils/scimage_11_10.py | 2 | 15717 | #!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/oneiric/current
$ wget $EC2_UBUNTU_IMG_URL/oneiric-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/oneiric-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf oneiric-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f oneiric-server-cloudimg-amd64.img
$ resize2fs oneiric-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount oneiric-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -o bind /dev /tmp/img-mount/dev
Copy /etc/resolv.conf and /etc/mtab to the image::
$ cp /etc/resolv.conf /tmp/img-mount/etc/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils "
BUILD_UTILS_PKGS += "python-setuptools python-pip python-nose"
CLOUD_CFG_FILE = '/etc/cloud/cloud.cfg'
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian maverick-cdh3 contrib'
CONDOR_APT = 'http://www.cs.wisc.edu/condor/debian/development lenny contrib'
NUMPY_SCIPY_SITE_CFG = """\
[DEFAULT]
library_dirs = /usr/lib
include_dirs = /usr/include:/usr/include/suitesparse
[blas_opt]
libraries = ptf77blas, ptcblas, atlas
[lapack_opt]
libraries = lapack, ptf77blas, ptcblas, atlas
[amd]
amd_libs = amd
[umfpack]
umfpack_libs = umfpack
[fftw]
libraries = fftw3
"""
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
TethysCluster Ubuntu 11.10 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://web.mit.edu/tethyscluster
Documentation: http://web.mit.edu/tethyscluster/docs/latest
Code: https://github.com/jtriley/TethysCluster
Mailing list: [email protected]
This AMI Contains:
* Custom-Compiled Atlas, Numpy, Scipy, etc
* Open Grid Scheduler (OGS) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* IPython 0.12 with parallel support
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./jobscript.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
CLOUD_INIT_CFG = """\
user: ubuntu
disable_root: 0
preserve_hostname: False
# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
cloud_init_modules:
- bootcmd
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- timezone
- puppet
- chef
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- keys-to-console
- final-message
apt_sources:
- source: deb $MIRROR $RELEASE multiverse
- source: deb %(CLOUDERA_APT)s
- source: deb-src %(CLOUDERA_APT)s
- source: deb %(CONDOR_APT)s
""" % dict(CLOUDERA_APT=CLOUDERA_APT, CONDOR_APT=CONDOR_APT)
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
srcfile = open(APT_SOURCES_FILE, 'w')
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
srcfile.write('deb %s\n' % CONDOR_APT)
srcfile.close()
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
apt_install('git')
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm /var/lock")
apt_install('condor')
run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
run_command('mkdir /var/lib/condor/log')
run_command('mkdir /var/lib/condor/run')
run_command('chown -R condor:condor /var/lib/condor/log')
run_command('chown -R condor:condor /var/lib/condor/run')
def install_torque():
chdir(SRC_DIR)
apt_install('torque-server torque-mom torque-client')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_atlas():
"""docstring for install_atlas"""
chdir(SRC_DIR)
apt_command('build-dep atlas')
if glob.glob("*atlas*.deb"):
run_command('dpkg -i *atlas*.deb')
return
apt_command('source atlas')
chdir('atlas-*')
run_command('fakeroot debian/rules custom')
run_command('dpkg -i ../*atlas*.deb')
def install_numpy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
apt_command('build-dep python-numpy')
if glob.glob('*numpy*.deb'):
run_command('dpkg -i *numpy*.deb')
return
apt_command('source python-numpy')
chdir('python-numpy*')
sitecfg = open('site.cfg', 'w')
sitecfg.write(NUMPY_SCIPY_SITE_CFG)
sitecfg.close()
_fix_atlas_rules()
def _deb_failure_callback(retval):
if not glob.glob('../*numpy*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*numpy*.deb')
def install_scipy():
"""docstring for install_scipy"""
chdir(SRC_DIR)
apt_command('build-dep python-scipy')
if glob.glob('*scipy*.deb'):
run_command('dpkg -i *scipy*.deb')
return
apt_command('source python-scipy')
chdir('python-scipy*')
sitecfg = open('site.cfg', 'w')
sitecfg.write(NUMPY_SCIPY_SITE_CFG)
sitecfg.close()
_fix_atlas_rules()
def _deb_failure_callback(retval):
if not glob.glob('../*numpy*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*scipy*.deb')
def install_openmpi():
chdir(SRC_DIR)
apt_command('build-dep libopenmpi-dev')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
return
apt_command('source libopenmpi-dev')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build openmpi with Grid Engine support")
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install pyzmq==2.1.9')
run_command('pip install ipython tornado pygments')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-tethyscluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_cloud_init():
"""docstring for configure_cloud_init"""
cloudcfg = open('/etc/cloud/cloud.cfg', 'w')
cloudcfg.write(CLOUD_INIT_CFG)
cloudcfg.close()
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = "python-dev git vim mercurial subversion cvs encfs "
pkgs += "openmpi-bin libopenmpi-dev python-django "
pkgs += "keychain screen tmux zsh ksh csh tcsh python-mpi4py "
pkgs += "python-virtualenv python-imaging python-boto python-matplotlib "
pkgs += "unzip rar unace build-essential gfortran ec2-api-tools "
pkgs += "ec2-ami-tools mysql-server mysql-client apache2 "
pkgs += "libapache2-mod-wsgi sysv-rc-conf pssh emacs cython irssi "
pkgs += "python-distutils-extra htop vim-scripts python-ctypes python-pudb"
apt_install(pkgs)
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm /etc/resolv.conf')
run_command('rm /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm %s' % f)
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_cloud_init()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_gridscheduler()
install_condor()
#install_torque()
install_pydrmaa()
install_atlas()
install_numpy()
install_scipy()
install_ipython()
install_openmpi()
install_hadoop()
install_nfs()
install_default_packages()
configure_init()
cleanup()
if __name__ == '__main__':
main()
| gpl-3.0 |
cgnorthcutt/rankpruning | tutorial_and_testing/CIFAR_MNIST_testing.py | 1 | 9829 |
# coding: utf-8
# In[ ]:
from __future__ import print_function
import numpy as np
from datetime import datetime as dt
from sklearn.linear_model import LogisticRegression
import pickle
import sys
import os
import errno
from rankpruning import RankPruning, other_pnlearning_methods
from util import get_dataset, downsample, get_metrics, make_sure_path_exists
# In[ ]:
def get_model(key = None, rh1 = None, rh0 = None, clf = None):
models = {
"Rank Pruning" : RankPruning(clf = clf),
"Baseline" : other_pnlearning_methods.BaselineNoisyPN(clf = clf),
"True Classifier": clf,
"Rank Pruning (noise rates given)": RankPruning(rh1, rh0, clf = clf),
"Elk08 (noise rates given)": other_pnlearning_methods.Elk08(e1 = 1 - rh1, clf = clf),
"Liu16 (noise rates given)": other_pnlearning_methods.Liu16(rh1, rh0, clf = clf),
"Nat13 (noise rates given)": other_pnlearning_methods.Nat13(rh1, rh0, clf = clf),
}
try:
model = models[key]
except:
model = None
return model
# In[ ]:
def run_test(
dataset,
clf_type,
epochs,
true_rh1,
downsample_ratio,
ordered_models_keys,
list_of_images = range(10),
suppress_error = False,
verbose = False,
pi1 = 0.0,
one_vs_rest = True,
cv_n_folds = 3,
early_stopping = True,
pulearning = None,
):
# Cast types to ensure consistency for 1 and 1.0, 0 and 0.0
true_rh1 = float(true_rh1)
downsample_ratio = float(downsample_ratio)
pi1 = float(pi1)
# Load MNIST or CIFAR data
(X_train_original, y_train_original), (X_test_original, y_test_original) = get_dataset(dataset = dataset)
X_train_original, y_train_original = downsample(X_train_original, y_train_original, downsample_ratio)
# Initialize models and result storage
metrics = {key:[] for key in ordered_models_keys}
data_all = {"metrics": metrics, "calculated": {}, "errors": {}}
start_time = dt.now()
# Run through the ten images class of 0, 1, ..., 9
for image in list_of_images:
if one_vs_rest:
# X_train and X_test will not be modified. All data will be used. Adjust pointers.
X_train = X_train_original
X_test = X_test_original
# Relabel the image data. Make label 1 only for given image.
y_train = np.array(y_train_original == image, dtype=int)
y_test = np.array(y_test_original == image, dtype=int)
else: # one_vs_other
# Reducing the dataset to just contain our image and image = 4
other_image = 4 if image != 4 else 7
X_train = X_train_original[(y_train_original == image) | (y_train_original == other_image)]
y_train = y_train_original[(y_train_original == image) | (y_train_original == other_image)]
X_test = X_test_original[(y_test_original == image) | (y_test_original == other_image)]
y_test = y_test_original[(y_test_original == image) | (y_test_original == other_image)]
# Relabel the data. Make label 1 only for given image.
y_train = np.array(y_train == image, dtype=int)
y_test = np.array(y_test == image, dtype=int)
print()
print("Evaluating image:", image)
print("Number of positives in y:", sum(y_train))
print()
sys.stdout.flush()
s = y_train * (np.cumsum(y_train) < (1 - true_rh1) * sum(y_train))
# In the presence of mislabeled negative (negative incorrectly labeled positive):
# pi1 is the fraction of mislabeled negative in the labeled set:
num_mislabeled = int(sum(y_train) * (1 - true_rh1) * pi1 / (1 - pi1))
if num_mislabeled > 0:
negative_set = s[y_train==0]
mislabeled = np.random.choice(len(negative_set), num_mislabeled, replace = False)
negative_set[mislabeled] = 1
s[y_train==0] = negative_set
print("image = {0}".format(image))
print("Training set: total = {0}, positives = {1}, negatives = {2}, P_noisy = {3}, N_noisy = {4}"
.format(len(X_train), sum(y_train), len(y_train)-sum(y_train), sum(s), len(s)-sum(s)))
print("Testing set: total = {0}, positives = {1}, negatives = {2}"
.format(len(X_test), sum(y_test), len(y_test) - sum(y_test)))
# Fit different models for PU learning
for key in ordered_models_keys:
fit_start_time = dt.now()
print("\n\nFitting {0} classifier. Default classifier is {1}.".format(key, clf_type))
if clf_type == "logreg":
clf = LogisticRegression()
elif clf_type == "cnn":
from classifier_cnn import CNN
from keras import backend as K
K.clear_session()
clf = CNN(
dataset_name = dataset,
num_category = 2,
epochs = epochs,
early_stopping = early_stopping,
verbose = 1,
)
else:
raise ValueError("clf_type must be either logreg or cnn for this testing file.")
ps1 = sum(s) / float(len(s))
py1 = sum(y_train) / float(len(y_train))
true_rh0 = pi1 * ps1 / float(1 - py1)
model = get_model(
key = key,
rh1 = true_rh1,
rh0 = true_rh0,
clf = clf,
)
try:
if key == "True Classifier":
model.fit(X_train, y_train)
elif key in ["Rank Pruning", "Rank Pruning (noise rates given)", "Liu16 (noise rates given)"]:
model.fit(X_train, s, pulearning = pulearning, cv_n_folds = cv_n_folds)
elif key in ["Nat13 (noise rates given)"]:
model.fit(X_train, s, pulearning = pulearning)
else: # Elk08, Baseline
model.fit(X_train, s)
pred = model.predict(X_test)
# Produces only P(y=1|x) for pulearning models because they are binary
pred_prob = model.predict_proba(X_test)
pred_prob = pred_prob[:,1] if key == "True Classifier" else pred_prob
# Compute metrics
metrics_dict = get_metrics(pred, pred_prob, y_test)
elapsed = (dt.now() - fit_start_time).total_seconds()
if verbose:
print("\n{0} Model Performance at image {1}:\n=================\n".format(key, image))
print("Time Required", elapsed)
print("AUC:", metrics_dict["AUC"])
print("Error:", metrics_dict["Error"])
print("Precision:", metrics_dict["Precision"])
print("Recall:", metrics_dict["Recall"])
print("F1 score:", metrics_dict["F1 score"])
print("rh1:", model.rh1 if hasattr(model, 'rh1') else None)
print("rh0:", model.rh0 if hasattr(model, 'rh0') else None)
print()
metrics_dict["image"] = image
metrics_dict["time_seconds"] = elapsed
metrics_dict["rh1"] = model.rh1 if hasattr(model, 'rh1') else None
metrics_dict["rh0"] = model.rh0 if hasattr(model, 'rh0') else None
# Append dictionary of error and loss metrics
if key not in data_all["metrics"]:
data_all["metrics"][key] = [metrics_dict]
else:
data_all["metrics"][key].append(metrics_dict)
data_all["calculated"][(key, image)] = True
except Exception as e:
msg = "Error in {0}, image {1}, rh1 {2}, m {3}: {4}\n".format(key, image, true_rh1, pi1, e)
print(msg)
make_sure_path_exists("failed_models/")
with open("failed_models/" + key + ".txt", "ab") as f:
f.write(msg)
if suppress_error:
continue
else:
raise
return data_all
# In[ ]:
try:
image_index = int(sys.argv[1])
except:
image_index = None
try:
model_index = int(sys.argv[2])
except:
model_index = None
image_list = range(10)
ordered_models_keys = [
"Rank Pruning",
"Rank Pruning (noise rates given)",
"Elk08 (noise rates given)",
"Nat13 (noise rates given)",
"Liu16 (noise rates given)",
"Baseline",
"True Classifier",
]
if image_index is not None:
# Select only the single element
# otherwise all images are tested.
image_list = [image_list[image_index]]
if model_index is not None:
# Select only the single model
# otherwise all models are tested.
ordered_models_keys = [ordered_models_keys[model_index]]
for image in image_list:
for pi1, true_rh1 in [(0.5, 0.5), (0.25, 0.25), (0.5, 0.0), (0.0, 0.5)]:
for model in ordered_models_keys:
# Parameter settings:
dataset = "mnist" # choose between mnist and cifar
downsample_ratio = 0.5 # What fraction of data to keep for speed increase
# clf specific settings:
clf_type = "logreg" # "logreg" or "cnn"
epochs = 50
cv_n_folds = 3
early_stopping = True
# Other settings (currently need not change):
suppress_error = False
verbose = True
one_vs_rest = True # Default is True, False -> test one vs other
pulearning = (pi1 == 0)
print("[***]", "true_rh1 =", true_rh1)
print("[***]", "image =", image)
print("[***]", "pi1 =", pi1)
print("[***]", "downsample_ratio =", downsample_ratio)
print("[***] {0} TEST: One vs.".format(dataset), "Rest" if one_vs_rest else "Other")
data_all = run_test(
dataset = dataset,
clf_type = clf_type,
epochs = epochs,
true_rh1 = true_rh1,
downsample_ratio = downsample_ratio,
ordered_models_keys = [model],
list_of_images = [image],
suppress_error = suppress_error,
verbose = verbose,
pi1 = pi1,
one_vs_rest = one_vs_rest,
cv_n_folds = cv_n_folds,
early_stopping = early_stopping,
pulearning = pulearning,
)
print("Completed: model", model, "and image", image)
# Before we store results, create folder if needed.
make_sure_path_exists("data/")
pickle.dump(data_all, open("data/metrics_{0}_{1}_{2}_epochs_rh1_{3}_downsample_{4}_model_{5}_image_{6}_pi1_{7}.p".format(dataset, clf_type, epochs, true_rh1, downsample_ratio, model, image, pi1),"wb"))
| mit |
GoogleCloudPlatform/datacatalog-connectors-rdbms | google-datacatalog-rdbms-connector/tests/google/datacatalog_connectors/rdbms/scrape/base_metadata_enricher_test.py | 1 | 3636 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pandas as pd
from google.datacatalog_connectors.rdbms.scrape import base_metadata_enricher
from google.datacatalog_connectors.commons_test import utils
class BaseMetadataEnricherTestCase(unittest.TestCase):
__MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
def test_enrich_schemas_metadata_with_csv_should_return_objects(self):
enrich_metadata_dict = {'entry_prefix': 'mycompany'}
metadata_definition = \
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'metadata_definition.json')
scraped_dataframe = pd.read_csv(
utils.Utils.get_resolved_file_name(self.__MODULE_PATH,
'sqlserver_full_dump.csv'))
enriched_dataframe = base_metadata_enricher.BaseMetadataEnricher(
metadata_definition,
enrich_metadata_dict).enrich(scraped_dataframe)
self.assertEqual(7, len(enriched_dataframe))
self.assertTrue(
enriched_dataframe['schema_name'][0].startswith('mycompany'))
self.assertTrue(
enriched_dataframe['table_name'][0].startswith('mycompany'))
def test_enrich_with_pattern_schemas_metadata_with_csv_should_return_objects( # noqa: E501
self):
enrich_metadata_dict = {
'entry_prefix': 'mycompany',
'entry_id_pattern_for_prefix': '^[^a-zA-Z_]+.*$'
}
metadata_definition = \
utils.Utils.convert_json_to_object(
self.__MODULE_PATH,
'metadata_definition.json')
scraped_dataframe = pd.read_csv(
utils.Utils.get_resolved_file_name(
self.__MODULE_PATH,
'sqlserver_full_dump_invalid_asset_names.csv'))
enriched_dataframe = base_metadata_enricher.BaseMetadataEnricher(
metadata_definition,
enrich_metadata_dict).enrich(scraped_dataframe)
self.assertEqual(10, len(enriched_dataframe))
self.assertFalse(
enriched_dataframe['schema_name'][6].startswith('mycompany'))
self.assertFalse(
enriched_dataframe['table_name'][6].startswith('mycompany'))
self.assertTrue(
enriched_dataframe['schema_name'][7].startswith('mycompany'))
self.assertTrue(
enriched_dataframe['table_name'][7].startswith('mycompany'))
self.assertTrue(
enriched_dataframe['column_name'][7].startswith('mycompany'))
self.assertFalse(
enriched_dataframe['schema_name'][8].startswith('mycompany'))
self.assertFalse(
enriched_dataframe['table_name'][8].startswith('mycompany'))
self.assertTrue(
enriched_dataframe['schema_name'][9].startswith('mycompany'))
self.assertTrue(
enriched_dataframe['table_name'][9].startswith('mycompany'))
self.assertFalse(
enriched_dataframe['column_name'][9].startswith('mycompany'))
| apache-2.0 |
hrjn/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 69 | 6473 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1])).astype(np.int)
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size // 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size // 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size // 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size // 5):(x_size // 2 - 1)],
err_cov_emp_full.mean(1)[(x_size // 5):(x_size // 2 - 1)],
color='green', ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
RAJSD2610/SDNopenflowSwitchAnalysis | ece671/Instantplot.py | 1 | 1749 | import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
path= os.path.expanduser("~/Desktop/ece671/SwitchAnalysis/PlotDump")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
t=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/SwitchAnalysis/PlotDump/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/SwitchAnalysis/PlotDump")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
f=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/SwitchAnalysis/PlotDump/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=1:
y=0
f.append(y)
i+=1
print(f)
plt.plot(list(range(1,len(t)+1)),t, '.-',label="traffic type and timeout")
plt.title("Total Flows Present after 1st flow")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
plt.plot(list(range(1,len(f)+1)),f, '.-',label="traffic type and timeout")
plt.title("Flows programmed per sec")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
| gpl-3.0 |
kashif/scikit-learn | sklearn/datasets/samples_generator.py | 3 | 56503 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
Ademan/NumPy-GSoC | numpy/lib/recfunctions.py | 58 | 34495 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarary as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data=None, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
err_msg = "The number of arrays does not match the number of names"
raise ValueError(err_msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
elif not hasattr(dtypes, '__iter__'):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, "\
"a single dtype or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = names.index(name)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names:
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names:
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
chkvch/on_GP | reos_water.py | 1 | 2818 | import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator
class eos:
def __init__(self, path_to_data=None):
if not path_to_data:
import os
path_to_data = os.environ['ongp_data_path']
path = '%s/reos_water_pt.dat' % path_to_data
# Nadine 22 Sep 2015: Fifth column is entropy in kJ/g/K+offset
self.names = 'logrho', 'logt', 'logp', 'logu', 'logs' #, 'chit', 'chirho', 'gamma1'
self.data = np.genfromtxt(path, names=self.names, usecols=(0, 1, 2, 3, 4))
self.logpvals = np.unique(self.data['logp'])
self.logtvals = np.unique(self.data['logt'])
self.logpmin = min(self.logpvals)
self.logpmax = max(self.logpvals)
self.logtmin = min(self.logtvals)
self.logtmax = max(self.logtvals)
self.nptsp = len(self.logpvals)
self.nptst = len(self.logtvals)
self.logrho_on_pt = np.zeros((self.nptsp, self.nptst))
self.logu_on_pt = np.zeros((self.nptsp, self.nptst))
self.logs_on_pt = np.zeros((self.nptsp, self.nptst))
# self.chit_on_pt = np.zeros((self.nptsp, self.nptst))
# self.chirho_on_pt = np.zeros((self.nptsp, self.nptst))
# self.gamma1_on_pt = np.zeros((self.nptsp, self.nptst))
for i, logpval in enumerate(self.logpvals):
data_this_logp = self.data[self.data['logp'] == logpval]
for j, logtval in enumerate(self.logtvals):
data_this_logp_logt = data_this_logp[data_this_logp['logt'] == logtval]
self.logrho_on_pt[i, j] = data_this_logp_logt['logrho']
self.logu_on_pt[i, j] = data_this_logp_logt['logu']
self.logs_on_pt[i, j] = data_this_logp_logt['logs']
# self.chit_on_pt[i, j] = data_this_logp_logt['chit']
# self.chirho_on_pt[i, j] = data_this_logp_logt['chirho']
# self.gamma1_on_pt[i, j] = data_this_logp_logt['gamma1']
pt_basis = (self.logpvals, self.logtvals)
self._get_logrho = RegularGridInterpolator(pt_basis, self.logrho_on_pt, bounds_error=False)
self._get_logu = RegularGridInterpolator(pt_basis, self.logu_on_pt)
self._get_logs = RegularGridInterpolator(pt_basis, self.logs_on_pt)
# self._get_chit = RegularGridInterpolator(pt_basis, self.chit_on_pt)
# self._get_chirho = RegularGridInterpolator(pt_basis, self.chirho_on_pt)
# self._get_gamma1 = RegularGridInterpolator(pt_basis, self.gamma1_on_pt)
def get_logrho(self, logp, logt):
return self._get_logrho((logp, logt))
def get_logu(self, logp, logt):
return self._get_logu((logp, logt))
def get_logs(self, logp, logt):
return self._get_logs((logp, logt)) # + 10. # kJ/g/K to erg/g/K
| gpl-2.0 |
wangqingbaidu/aliMusic | analysis/artist_14_15_16.py | 1 | 4331 | # -*- coding: UTF-8 -*-
'''
Authorized by vlon Jang
Created on Jun 19, 2016
Email:[email protected]
From Institute of Computing Technology
All Rights Reserved.
'''
import pandas as pd
import numpy as np
import pymysql
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
mysql_cn= pymysql.connect(host='10.25.0.118', port=3306,user='root', passwd='111111', db='music')
def get_min_error_res(play_res):
res_sum = 0
res_sum_2 = 0
for res in play_res:
if res < 1: continue
res_sum += 1.0/res
res_sum_2 += 1.0/(res*res)
if res_sum == 0: return 0
return res_sum / res_sum_2
def get_min_error_mean_results(queryType = None):
"""
in_filename: artist_id, times, ds
"""
assert queryType
keys = ['artist_id', 'times', 'ds']
artist = {}
# data = pd.read_csv(in_filename, header = None, names = keys)
data = None
if queryType == 16:
data = pd.read_sql('''
select artist_id, plays, ds from artist_play where ds >= '20150815' and ds <= '20150830'
''', mysql_cn)
elif queryType == 15:
data = pd.read_sql('''
select artist_id, plays, ds from artist_play where ds >= '20150816' and ds <= '20150830'
''', mysql_cn)
elif queryType == 14:
data = pd.read_sql('''
select artist_id, plays, ds from artist_play where ds >= '20150817' and ds <= '20150830'
''', mysql_cn)
else:
assert False
data.columns = keys
days = set()
for _, row in data.iterrows():
artist_id = row[keys[0]]
if artist_id not in artist:
artist[artist_id] = []
artist[artist_id].append(row[keys[1]])
days.add(row[keys[2]])
days = [day for day in days]
sorted(days)
results = []
for artist_id, times in artist.iteritems():
min_error_res = int(get_min_error_res(times))
for day in days:
results.append([artist_id, min_error_res, day])
df = pd.DataFrame(results)
df.columns = ['artist_id', 'plays', 'ds']
df.pop('ds')
df = df.groupby(by='artist_id')['plays'].mean().reset_index()
df.columns = ['artist_id', 'plays']
df = df.sort_values(by = 'artist_id')
return df
def analysis():
avg_14 = pd.read_sql('''
SELECT artist_id, avg(plays) as plays FROM artist_play
WHERE ds >= '20150817' AND ds <= '20150830'
GROUP BY artist_id
order by artist_id;
''', mysql_cn)
avg_15 = pd.read_sql('''
SELECT artist_id, avg(plays) as plays FROM artist_play
WHERE ds >= '20150816' AND ds <= '20150830'
GROUP BY artist_id
order by artist_id;
''', mysql_cn)
avg_16 = pd.read_sql('''
SELECT artist_id, avg(plays) as plays FROM artist_play
WHERE ds >= '20150815' AND ds <= '20150830'
GROUP BY artist_id
order by artist_id;
''', mysql_cn)
print avg_14.iloc[[74,78]]
me_14 = get_min_error_mean_results(14)
me_15 = get_min_error_mean_results(15)
me_16 = get_min_error_mean_results(16)
dropIndex = [78, 74]
avg_14 = avg_14.drop(dropIndex)
me_14 = me_14.drop(dropIndex)
avg_15 = avg_15.drop(dropIndex)
me_15 = me_15.drop(dropIndex)
avg_16 = avg_16.drop(dropIndex)
me_16 = me_16.drop(dropIndex)
avg = [avg_14, avg_15, avg_16]
me = [me_14, me_15, me_16]
s = [avg_15, me_14]
s = [me_14['plays']/ x['plays'] for x in s]
x = pd.DataFrame(me_14['plays']/ avg_15['plays'])
print x
x.columns = ['plays']
x = x.sort_values(by = 'plays')
print x.iloc[49]
df_show = pd.concat(s, axis = 1, ignore_index=True)
df_show.columns = ['me_14',
# 'me_15',
# 'me_16',
'avg_14',
# 'avg_15',
# 'avg_16'
]
# df_show.columns = ['avg_14', 'me_14']
df_show.plot()
plt.show()
# fig = plt.gcf()
# fig.savefig('./img/compare_plays.png')
mysql_cn.close()
if __name__ == '__main__':
analysis()
| gpl-3.0 |
SXBK/kaggle | mercedes-benz/stack-0.56.py | 1 | 6009 | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np
from sklearn.base import BaseEstimator,TransformerMixin, ClassifierMixin
from sklearn.preprocessing import LabelEncoder
import xgboost as xgb
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.linear_model import ElasticNetCV, LassoLarsCV
from sklearn.ensemble import GradientBoostingRegressor, ExtraTreesRegressor, RandomForestRegressor
from sklearn.pipeline import make_pipeline, make_union
from sklearn.utils import check_array
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor
from sklearn.random_projection import GaussianRandomProjection
from sklearn.random_projection import SparseRandomProjection
from sklearn.decomposition import PCA, FastICA
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import r2_score
class StackingEstimator(BaseEstimator, TransformerMixin):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, y=None, **fit_params):
self.estimator.fit(X, y, **fit_params)
return self
def transform(self, X):
X = check_array(X)
X_transformed = np.copy(X)
# add class probabilities as a synthetic feature
if issubclass(self.estimator.__class__, ClassifierMixin) and hasattr(self.estimator, 'predict_proba'):
X_transformed = np.hstack((self.estimator.predict_proba(X), X))
# add class prodiction as a synthetic feature
X_transformed = np.hstack((np.reshape(self.estimator.predict(X), (-1, 1)), X_transformed))
return X_transformed
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
for c in train.columns:
if train[c].dtype == 'object':
lbl = LabelEncoder()
lbl.fit(list(train[c].values) + list(test[c].values))
train[c] = lbl.transform(list(train[c].values))
test[c] = lbl.transform(list(test[c].values))
n_comp = 12
# tSVD
tsvd = TruncatedSVD(n_components=n_comp, random_state=420)
tsvd_results_train = tsvd.fit_transform(train.drop(["y"], axis=1))
tsvd_results_test = tsvd.transform(test)
# PCA
pca = PCA(n_components=n_comp, random_state=420)
pca2_results_train = pca.fit_transform(train.drop(["y"], axis=1))
pca2_results_test = pca.transform(test)
# ICA
ica = FastICA(n_components=n_comp, random_state=420)
ica2_results_train = ica.fit_transform(train.drop(["y"], axis=1))
ica2_results_test = ica.transform(test)
# GRP
grp = GaussianRandomProjection(n_components=n_comp, eps=0.1, random_state=420)
grp_results_train = grp.fit_transform(train.drop(["y"], axis=1))
grp_results_test = grp.transform(test)
# SRP
srp = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=420)
srp_results_train = srp.fit_transform(train.drop(["y"], axis=1))
srp_results_test = srp.transform(test)
#save columns list before adding the decomposition components
usable_columns = list(set(train.columns) - set(['y']))
# Append decomposition components to datasets
# 注释掉这一块增添数据的内容后, r2 下降, score下降0.13
for i in range(1, n_comp + 1):
train['pca_' + str(i)] = pca2_results_train[:, i - 1]
test['pca_' + str(i)] = pca2_results_test[:, i - 1]
train['ica_' + str(i)] = ica2_results_train[:, i - 1]
test['ica_' + str(i)] = ica2_results_test[:, i - 1]
train['tsvd_' + str(i)] = tsvd_results_train[:, i - 1]
test['tsvd_' + str(i)] = tsvd_results_test[:, i - 1]
train['grp_' + str(i)] = grp_results_train[:, i - 1]
test['grp_' + str(i)] = grp_results_test[:, i - 1]
train['srp_' + str(i)] = srp_results_train[:, i - 1]
test['srp_' + str(i)] = srp_results_test[:, i - 1]
#usable_columns = list(set(train.columns) - set(['y']))
y_train = train['y'].values
y_mean = np.mean(y_train)
id_test = test['ID'].values
#finaltrainset and finaltestset are data to be used only the stacked model (does not contain PCA, SVD... arrays)
finaltrainset = train[usable_columns].values
finaltestset = test[usable_columns].values
'''Train the xgb model then predict the test data'''
xgb_params = {
'n_trees': 520,
'eta': 0.0045,
'max_depth': 4,
'subsample': 0.93,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'base_score': y_mean, # base prediction = mean(target)
'silent': 1
}
# NOTE: Make sure that the class is labeled 'class' in the data file
dtrain = xgb.DMatrix(train.drop('y', axis=1), y_train)
dtest = xgb.DMatrix(test)
num_boost_rounds = 1250
# train model
model = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round=num_boost_rounds)
y_pred = model.predict(dtest)
'''Train the stacked models then predict the test data'''
stacked_pipeline = make_pipeline(
StackingEstimator(estimator=LassoLarsCV(normalize=True)),
StackingEstimator(estimator=GradientBoostingRegressor(learning_rate=0.001, loss="huber", max_depth=3, max_features=0.55, min_samples_leaf=18, min_samples_split=14, subsample=0.7)),
LassoLarsCV(),
#StackingEstimator(RandomForestRegressor(max_depth=5, min_samples_split=4, n_estimators=20)),
#StackingEstimator(ExtraTreesRegressor(n_estimators=20, n_jobs=-1, criterion='mse')),
#LassoLarsCV(),
)
stacked_pipeline.fit(finaltrainset, y_train)
results = stacked_pipeline.predict(finaltestset)
'''R2 Score on the entire Train data when averaging'''
print('R2 score on train data:')
print(r2_score(y_train,stacked_pipeline.predict(finaltrainset)*0.2855 + model.predict(dtrain)*0.7145))
'''Average the preditionon test data of both models then save it on a csv file'''
sub = pd.DataFrame()
sub['ID'] = id_test
sub['y'] = y_pred*0.75 + results*0.25
sub.to_csv('stacked-models.csv', index=False)
# Any results you write to the current directory are saved as output.
| gpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/tests/test_backend_pdf.py | 10 | 2894 | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import numpy as np
from matplotlib import cm, rcParams
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import (image_comparison, knownfailureif,
cleanup)
if 'TRAVIS' not in os.environ:
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
@cleanup
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
@cleanup
def test_multipage_pagecount():
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
@cleanup
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
### test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
### test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
| mit |
ojengwa/sympy | sympy/plotting/plot.py | 3 | 62386 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from itertools import chain
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
def append(self, *args):
"""Adds one more graph to the figure."""
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series.append(*args)
else:
self._series.append(Series(*args))
def extend(self, arg):
"""Adds the series from another plot or a list of series."""
if isinstance(arg, Plot):
self._series.extend(arg._series)
else:
self._series.extend(arg)
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a sympy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(True)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
self.ax.legend()
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x - axis.
``ylabel`` : str. Label for the y - axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x - axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y - axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x - axis limits.
``ylim`` : tuple of two floats, denoting the y - axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x - axis.
``ylabel`` : str. Label for the y - axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x - axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y - axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x - axis limits.
``ylim`` : tuple of two floats, denoting the y - axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x,y,u,v = symbols('x y u v')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set.union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set.union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
ElBell/VTDairyDB | manage.py | 1 | 10171 | import sys, os
from main import app
from flask_script import Manager, Server, Command, Option
from flask_security.utils import encrypt_password
from models import db, populate_db, StatusData, GrowthData, LifeData, GrowthDataAverages
from main import app
import random
from datetime import date, datetime
import pandas
from tqdm import tqdm
from dateutil import parser
from sqlalchemy import desc
class ResetDB(Command):
"""Drops all tables and recreates them"""
def run(self, **kwargs):
db.drop_all()
db.create_all()
class PopulateDB(Command):
option_list = (
Option('--file', '-f', dest='user_data_file', default='scripts/user_data.csv'),
)
"""Fills in predefined data into DB"""
def run(self, user_data_file, **kwargs):
print("Complete")
populate_db()
def parse_float(val):
try:
float(val)
return float(val)
except ValueError:
if val in (".", ""):
return None
print(val)
raise Exception
class ConvertAll(Command):
def run(self):
for animal in GrowthData.query.all():
animal.weight = animal.weight*0.453592 if isinstance(animal.weight, (int, float)) else None
animal.height = animal.height*2.54 if isinstance(animal.height, (int, float)) else None
animal.lifetime_adg = animal.lifetime_adg*0.453592 if isinstance(animal.lifetime_adg, (int, float)) else None
animal.monthly_adg = animal.monthly_adg*0.453592 if isinstance(animal.monthly_adg, (int, float)) else None
animal.monthly_height_change = (animal.monthly_height_change/10) * 25.4 if isinstance(animal.monthly_height_change, (int, float)) else None
db.session.commit()
print("GrowthData converted")
for animal in LifeData.query.all():
animal.bwt = animal.bwt*0.453592 if isinstance(animal.bwt, (int, float)) else None
db.session.commit()
print("LifeData converted")
class ProcessLifeData(Command):
option_list = (
Option('--file', '-f', dest='full_filename', default='data/lifeData.csv'),
)
def run(self, full_filename):
data = pandas.read_csv(full_filename)
data = data.dropna()
# Removes the first two rows of not data
data = data.ix[4:]
# Labels the columns as follows (so columns MUST BE IN THIS ORDER)
data.columns = ['FID', 'EID', 'Breed', 'DOB']
#app.logger.info(data)
for index, row in data.iterrows():
life = LifeData.query.filter_by(fid=row['FID']).first()
if life is None:
life = LifeData(fid=row['FID'], eid=row['EID'], breed=row['Breed'], dob=parser.parse(row['DOB']))
db.session.add(life)
else:
life.dob=parser.parse(row['DOB'])
life.breed=row['Breed']
life.eid=row['EID']
# Add won't happen without it
db.session.commit()
class ProcessGrowthData(Command):
option_list = (
Option('--file', '-f', dest='full_filename', default='data/growthData.xlsx'),
)
def run(self, full_filename):
data = pandas.read_excel(full_filename)
data = data.set_index('Index')
status_data, growth_data_old, growth_data_new = data.ix[:, :6], data.ix[:, 6:158], data.ix[:, 158:]
# print(growth_data_old.index)
for index, row in tqdm(status_data.iterrows()):
status = StatusData(fid=int(index), status=row['Status'], status_date=None if pandas.isnull(row['Date']) else row['Date'])
db.session.add(status)
life = LifeData.query.filter_by(fid=int(index)).first()
if life is None:
life = LifeData(fid=int(index), bwt=row['BWt'], dob=row['Birthdate'], breed=row['Brd'], estimate=True if type(row['Estimate']) is unicode else False)
db.session.add(life)
else:
if life.bwt is None:
life.bwt = row['BWt']
life.dob = row['Birthdate']
life.breed = row['Brd']
life.estimate = True if type(row['Estimate']) is unicode else False
db.session.commit()
growth_data_old.columns = pandas.MultiIndex.from_tuples([(c[:-1], c[-1]) for c in growth_data_old.columns])
for row_name, row in tqdm(growth_data_old.iterrows()):
row = row.where((pandas.notnull(row)), None)
for date_name, weight_data in row.unstack().iterrows():
weight = weight_data['W'] if type(weight_data['W']) == int or type(weight_data['W']) == float else None
date = weight_data['D'] if type(weight_data['D']) != pandas.tslib.NaTType else None
location = weight_data['L'] if type(weight_data['L']) != pandas.tslib.NaTType else None
height = weight_data['H'] if type(weight_data['H']) != pandas.tslib.NaTType else None
# print(row_name, weight, date, location, height)
if weight is None:
continue
measurement = GrowthData.new(fid=int(row_name), date=date, weight=weight, height=parse_float(height) if height is not None else height, location=location)
db.session.add(measurement)
# print("Adding weighing "+str(row_name)+", "+date_name+":", weight_data.get('D', date_name), weight_data['L'], weight_data['W'], weight_data['H'])
db.session.commit()
growth_data_new.columns = pandas.MultiIndex.from_tuples([(c[:-1], c[-1]) for c in growth_data_new.columns])
for row_name, row in tqdm(growth_data_new.iterrows()):
row = row.where((pandas.notnull(row)), None)
for date_name, weight_data in row.unstack().iterrows():
date = datetime.strptime(date_name, '%y%m%d').date()
weight = weight_data['W'] if type(weight_data['W']) == int or type(weight_data['W']) == float else None
location = weight_data['L'] if type(weight_data['L']) != pandas.tslib.NaTType else None
bcs = weight_data['C']
# print(type(bcs))
height = weight_data['H'] if type(weight_data['H']) != pandas.tslib.NaTType else None
#print(row_name, weight, date, location, height)
if weight is None:
continue
measurement = GrowthData.new(fid=int(row_name), bcs=parse_float(bcs) if bcs is not None else bcs, location=location, date=date, weight=weight, height=parse_float(height) if height is not None else height)
db.session.add(measurement)
# print("Adding weighing "+str(row_name)+", "+date_name+":", weight_data['C'], weight_data.get('D', date_name), weight_data['L'], weight_data['W'], weight_data['H'])
db.session.commit()
class CalculateGrowthAverageData(Command):
def run(self):
fids = db.session.query(GrowthData.fid).distinct()
for fid in tqdm(fids):
fid_data = db.session.query(GrowthData).filter(GrowthData.fid == fid.fid).order_by(desc(GrowthData.date)).all()
today = fid_data[0]
growth_averages = GrowthDataAverages.query.filter_by(fid=int(fid.fid)).first()
life_data = LifeData.query.filter_by(fid=int(fid.fid)).first()
if len(fid_data) > 1:
previous = fid_data[1]
time_dif = today.date - previous.date
time_dif = time_dif.days
monthly_weight_dif = float(today.weight - previous.weight)
monthly_adg = float(monthly_weight_dif/time_dif)
if previous.height is not None and today.height is not None:
monthly_height_dif = float(today.height - previous.height)
monthly_height_change = float(monthly_height_dif/time_dif)
else:
monthly_height_change = None
age = today.date - life_data.dob
age = age.days
lifetime_weight_dif = float(today.weight - life_data.bwt)
lifetime_adg = float(lifetime_weight_dif/age)
if growth_averages is None:
growth_averages = GrowthDataAverages(fid=int(fid.fid), most_recent_date=today.date, monthly_adg=monthly_adg, age=age, lifetime_adg=lifetime_adg, monthly_height_change=monthly_height_change)
db.session.add(growth_averages)
else:
growth_averages.most_recent_date = today.date
growth_averages.monthly_adg = monthly_adg
growth_averages.age = age
growth_averages.lifetime_adg = lifetime_adg
growth_averages.monthly_height_change = monthly_height_change
else:
time_dif = 0
db.session.commit()
class DisplayDB(Command):
def run(self, **kwargs):
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay3 import create_schema_graph
connection = app.config['SQLALCHEMY_DATABASE_URI']
filename='dbschema.png'
graph = create_schema_graph(metadata=MetaData(connection),
show_datatypes=False, # The image would get nasty big if we'd show the datatypes
show_indexes=False, # ditto for indexes
rankdir='BT', # From left to right (instead of top to bottom)
font='Helvetica',
concentrate=False # Don't try to join the relation lines together
)
graph.write_png(filename) # write out the file
manager = Manager(app)
# Server commands context
#manager.add_command("secure", Server(ssl_context=context))
# Database Commands
manager.add_command("reset_db", ResetDB())
manager.add_command("populate_db", PopulateDB())
manager.add_command("display_db", DisplayDB())
manager.add_command("process_growth_data", ProcessGrowthData())
manager.add_command("process_life_data", ProcessLifeData())
manager.add_command("convert_all", ConvertAll())
manager.add_command("calculate_growth_averages", CalculateGrowthAverageData())
if __name__ == "__main__":
manager.run()
| gpl-3.0 |
amandersillinois/landlab | tests/components/species_evolution/test_zone_objects.py | 3 | 14279 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for SpeciesEvolver zone objects."""
import numpy as np
import pandas as pd
import pytest
from landlab import RasterModelGrid
from landlab.components import SpeciesEvolver
from landlab.components.species_evolution import ZoneController, ZoneTaxon, zone as zn
@pytest.fixture()
def zone_example_grid():
mg = RasterModelGrid((5, 7), 2)
z = mg.add_zeros("node", "topographic__elevation")
return mg, z
def zone_func(grid):
z = grid.at_node["topographic__elevation"]
return z == 1
def zone_func_with_vars(grid, var1, var2):
z = grid.at_node["topographic__elevation"]
return np.all([z == 1, grid.x_of_node > var1, grid.y_of_node > var2], 0)
def test_none_to_none(zone_example_grid):
mg, z = zone_example_grid
sc = ZoneController(mg, zone_func)
sc.run_one_step(1)
np.testing.assert_array_equal(len(sc.zones), 0)
def test_none_to_one(zone_example_grid):
mg, z = zone_example_grid
# No zones exist at time 0.
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
np.testing.assert_equal(len(sc.zones), 0)
expected_df = pd.DataFrame(
{
"time": [0],
"zones": [0],
"fragmentations": [np.nan],
"captures": [np.nan],
"area_captured_sum": [np.nan],
"area_captured_max": [np.nan],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
# Create a zone for time 1.
z[[9, 10, 11, 12]] = 1
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(sc.zones), 1)
np.testing.assert_equal(sc.zones[0]._conn_type, zn.Connection.NONE_TO_ONE)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"zones": [0, 1],
"fragmentations": [np.nan, 0],
"captures": [np.nan, 0],
"area_captured_sum": [np.nan, 0],
"area_captured_max": [np.nan, 0],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(se.record_data_frame.taxa.sum(), 0)
def test_one_to_none(zone_example_grid):
mg, z = zone_example_grid
# Create a zone for time 0.
z[[9, 10, 11, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
np.testing.assert_equal(len(sc.zones), 1)
zone = sc.zones[0]
expected_df = pd.DataFrame(
{
"time": [0],
"zones": [1],
"fragmentations": [np.nan],
"captures": [np.nan],
"area_captured_sum": [np.nan],
"area_captured_max": [np.nan],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
# No zones for time 1.
z[:] = 0
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(sc.zones), 0)
np.testing.assert_equal(zone._conn_type, zn.Connection.ONE_TO_NONE)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"zones": [1, 0],
"fragmentations": [np.nan, 0],
"captures": [np.nan, 0],
"area_captured_sum": [np.nan, 0],
"area_captured_max": [np.nan, 0],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(se.record_data_frame.taxa.sum(), 1)
def test_one_to_one(zone_example_grid):
mg, z = zone_example_grid
# Create a zone for time 0.
z[[9, 10, 11, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
np.testing.assert_equal(len(sc.zones), 1)
zone = sc.zones[0]
expected_df = pd.DataFrame(
{
"time": [0],
"zones": [1],
"fragmentations": [np.nan],
"captures": [np.nan],
"area_captured_sum": [np.nan],
"area_captured_max": [np.nan],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
# Modify elevation, although there is still one zone in time 1.
z[[11, 12]] = 0
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(sc.zones), 1)
np.testing.assert_equal(zone, sc.zones[0])
np.testing.assert_equal(sc.zones[0]._conn_type, zn.Connection.ONE_TO_ONE)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"zones": [1, 1],
"fragmentations": [np.nan, 0],
"captures": [np.nan, 0],
"area_captured_sum": [np.nan, 0],
"area_captured_max": [np.nan, 0],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects(time=1)), 1)
def test_one_to_many(zone_example_grid):
mg, z = zone_example_grid
# Create a zone for time 0.
z[[9, 10, 11, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
expected_df = pd.DataFrame(
{
"time": [0],
"zones": [1],
"fragmentations": [np.nan],
"captures": [np.nan],
"area_captured_sum": [np.nan],
"area_captured_max": [np.nan],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects(time=0)), 1)
# Break the zone in two for time 1.
z[11] = 0
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(sc.zones), 2)
np.testing.assert_equal(
set([z._conn_type for z in sc.zones]), set([None, zn.Connection.ONE_TO_MANY])
)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"zones": [1, 2],
"fragmentations": [np.nan, 2],
"captures": [np.nan, 0],
"area_captured_sum": [np.nan, 0],
"area_captured_max": [np.nan, 0],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects()), 2)
def test_many_to_one(zone_example_grid):
mg, z = zone_example_grid
# Create two zones for time 0.
z[[8, 9, 10, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
expected_df = pd.DataFrame(
{
"time": [0],
"zones": [2],
"fragmentations": [np.nan],
"captures": [np.nan],
"area_captured_sum": [np.nan],
"area_captured_max": [np.nan],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects(time=0)), 2)
# Modify elevation such that two zones each overlap the original two zones.
z[11] = 1
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(sc.zones), 1)
np.testing.assert_equal(sc.zones[0]._conn_type, zn.Connection.MANY_TO_ONE)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"zones": [2, 1],
"fragmentations": [np.nan, 0],
"captures": [np.nan, 1],
"area_captured_sum": [np.nan, 12],
"area_captured_max": [np.nan, 12],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects(time=1)), 2)
def test_many_to_many(zone_example_grid):
mg, z = zone_example_grid
# Create two zones for time 0.
z[[10, 12, 17, 19, 24, 26]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
expected_df = pd.DataFrame(
{
"time": [0],
"zones": [2],
"fragmentations": [np.nan],
"captures": [np.nan],
"area_captured_sum": [np.nan],
"area_captured_max": [np.nan],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects(time=0)), 2)
# Modify elevation such that two zones each overlap the original two zones.
z[[17, 19]] = 0
z[[11, 25]] = 1
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(sc.zones), 2)
for zone in sc.zones:
np.testing.assert_equal(zone._conn_type, zn.Connection.MANY_TO_MANY)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"zones": [2, 2],
"fragmentations": [np.nan, 0],
"captures": [np.nan, 2],
"area_captured_sum": [np.nan, 24],
"area_captured_max": [np.nan, 12],
}
)
pd.testing.assert_frame_equal(sc.record_data_frame, expected_df, check_like=True)
np.testing.assert_equal(len(se.get_extant_taxon_objects()), 4)
def test_one_to_many_to_one(zone_example_grid):
mg, z = zone_example_grid
z[[9, 10, 11, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1, time_to_allopatric_speciation=1)
se.track_taxa(taxa)
z[11] = 0
sc.run_one_step(1)
se.run_one_step(1)
z[11] = 1
sc.run_one_step(1)
se.run_one_step(1)
np.testing.assert_equal(len(se.get_extant_taxon_objects()), 1)
def test_min_area(zone_example_grid):
mg, z = zone_example_grid
# Create a zone for time 0.
z[[9, 11, 12]] = 1
sc = ZoneController(mg, zone_func, minimum_area=5)
np.testing.assert_equal(len(sc.zones), 1)
def test_neighborhood_structure(zone_example_grid):
mg, z = zone_example_grid
z[[10, 16]] = 1
sc = ZoneController(mg, zone_func)
np.testing.assert_equal(len(sc.zones), 1)
sc = ZoneController(mg, zone_func, neighborhood_structure="D4")
np.testing.assert_equal(len(sc.zones), 2)
np.testing.assert_raises(
ValueError, ZoneController, mg, zone_func, neighborhood_structure="D"
)
def test_zone_func_kwargs(zone_example_grid):
mg, z = zone_example_grid
z[mg.core_nodes] = 1
sc = ZoneController(mg, zone_func_with_vars, var1=1, var2=2)
expected_mask = np.array(
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
True,
True,
True,
True,
False,
False,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
False,
False,
False,
]
)
np.testing.assert_array_equal(sc.zones[0].mask, expected_mask)
z[-2:] = 0
expected_mask[-2:] = False
sc.run_one_step(10)
np.testing.assert_array_equal(sc.zones[0].mask, expected_mask)
def test_zone_taxon_range_mask(zone_example_grid):
mg, z = zone_example_grid
# Create a zone for time 0.
ids = [9, 10, 11, 12]
z[ids] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1)
se.track_taxa(taxa)
expected_mask = np.zeros(mg.number_of_nodes, bool)
expected_mask[ids] = True
np.testing.assert_array_equal(taxa[0].range_mask, expected_mask)
# Remove extent so taxa range mask is all False.
z[ids] = 0
sc.run_one_step(1)
se.run_one_step(1)
expected_mask = np.zeros(mg.number_of_nodes, bool)
np.testing.assert_array_equal(taxa[0].range_mask, expected_mask)
def test_time_to_allopatric_speciation(zone_example_grid):
mg, z = zone_example_grid
# Create a zone for time 0.
z[[9, 10, 11, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1, time_to_allopatric_speciation=20)
se.track_taxa(taxa)
z[[11]] = 0
while len(se.taxa_data_frame) == 1:
sc.run_one_step(10)
se.run_one_step(10)
expected_df = pd.DataFrame(
{
"pid": [np.nan, 0],
"type": 2 * [ZoneTaxon.__name__],
"t_first": [0, 30],
"t_final": 2 * [np.nan],
},
index=[0, 1],
)
expected_df.index.name = "tid"
expected_df["pid"] = expected_df["pid"].astype("Int64")
expected_df["t_final"] = expected_df["t_final"].astype("Int64")
pd.testing.assert_frame_equal(se.taxa_data_frame, expected_df, check_like=True)
def test_pseudoextinction(zone_example_grid):
mg, z = zone_example_grid
z[[9, 10, 11, 12]] = 1
se = SpeciesEvolver(mg)
sc = ZoneController(mg, zone_func)
taxa = sc.populate_zones_uniformly(1, persists_post_speciation=False)
se.track_taxa(taxa)
z[11] = 0
sc.run_one_step(1)
se.run_one_step(1)
expected_df = pd.DataFrame(
{
"pid": [np.nan, 0, 0],
"type": 3 * [ZoneTaxon.__name__],
"t_first": [0, 1, 1],
"t_final": [1, np.nan, np.nan],
},
index=[0, 1, 2],
)
expected_df.index.name = "tid"
expected_df["pid"] = expected_df["pid"].astype("Int64")
expected_df["t_final"] = expected_df["t_final"].astype("Int64")
pd.testing.assert_frame_equal(se.taxa_data_frame, expected_df, check_like=True)
expected_df = pd.DataFrame(
{
"time": [0, 1],
"taxa": [1, 2],
"speciations": [np.nan, 2],
"extinctions": [np.nan, 0],
"pseudoextinctions": [np.nan, 1],
}
)
pd.testing.assert_frame_equal(se.record_data_frame, expected_df, check_like=True)
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Coupled_Contact/Dynamic_Loading_Single_Foundation_System_Under_Compression/CoupledHardContact/k_1e-4/Plot_Current.py | 7 | 4125 | #!/usr/bin/env python
#!/usr/bin/python
import h5py
from matplotlib import pylab
import matplotlib.pylab as plt
import sys
from matplotlib.font_manager import FontProperties
import math
import numpy as np
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 30})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=28
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=28
Pore_Pressure = [];
Solid_Displacement_u = [];
Solid_Displacement_uPU = [];
Fluid_Displacement_uPU = [];
Time = [];
# Plot the figure. Add labels and titles.
plt.figure()
ax = plt.subplot(111)
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Stress [Pa] ")
# Loading Stage
# #########################################################################
thefile = "Soil_Foundation_System_Surface_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
upU_p = finput["/Model/Nodes/Generalized_Displacements"][3,:]
upU_u = finput["/Model/Nodes/Generalized_Displacements"][2,:]
upU_U = finput["/Model/Nodes/Generalized_Displacements"][6,:]
u_u = finput["/Model/Nodes/Generalized_Displacements"][79,:]
sigma_zz_ = finput["/Model/Elements/Gauss_Outputs"][14,:]
Pore_Pressure = np.append(Pore_Pressure,upU_p);
Solid_Displacement_u = np.append(Solid_Displacement_u , u_u);
Solid_Displacement_uPU = np.append(Solid_Displacement_uPU, upU_u);
Fluid_Displacement_uPU = np.append(Fluid_Displacement_uPU, upU_U);
Time = np.append(Time, times);
# Consolidation Stage
# #########################################################################
thefile = "Soil_Foundation_System_Consolidation.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = times[-1] + finput["time"][:]
upU_p = finput["/Model/Nodes/Generalized_Displacements"][3,:]
upU_u = finput["/Model/Nodes/Generalized_Displacements"][2,:]
upU_U = finput["/Model/Nodes/Generalized_Displacements"][6,:]
u_u = finput["/Model/Nodes/Generalized_Displacements"][79,:]
sigma_zz_ = finput["/Model/Elements/Gauss_Outputs"][14,:]
Pore_Pressure = np.append(Pore_Pressure,upU_p);
Solid_Displacement_u = np.append(Solid_Displacement_u , u_u);
Solid_Displacement_uPU = np.append(Solid_Displacement_uPU, upU_u);
Fluid_Displacement_uPU = np.append(Fluid_Displacement_uPU, upU_U);
Time = np.append(Time, times);
##### Start Plotting
ax.semilogx(Time,Pore_Pressure,'b',linewidth=2,label=r'Pore Pressure $p$');
ax.hold(True);
ax.hold(True);
# max_yticks = 5
# yloc = plt.MaxNLocator(max_yticks)
# ax.yaxis.set_major_locator(yloc)
# max_xticks = 5
# yloc = plt.MaxNLocator(max_xticks)
# ax.xaxis.set_major_locator(yloc)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.35),
ncol=2, fancybox=True, shadow=True, prop={'size': 24})
pylab.savefig("Current_Excess_Pore_Pressure.pdf", bbox_inches='tight')
##### Drainage Condition #####
ax.hold(False);
fig = plt.figure();
ax = plt.subplot(111)
ax.semilogx(Time,Solid_Displacement_uPU*1e8,'k',linewidth=3,label=r'$upU\_u$'); ax.hold(True);
ax.semilogx(Time,Fluid_Displacement_uPU*1e8,'b',linewidth=10,label=r'$upU\_U$'); ax.hold(True);
ax.semilogx(Time,Solid_Displacement_u*1e8,'r',linewidth=3,label=r'$u\_u$'); ax.hold(True);
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Displacement $\times 1e^{-8}$ [m] ")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.25),
ncol=4, fancybox=True, shadow=True, prop={'size': 24})
# max_yticks = 5
# yloc = plt.MaxNLocator(max_yticks)
# ax.yaxis.set_major_locator(yloc)
# max_xticks = 5
# yloc = plt.MaxNLocator(max_xticks)
# ax.xaxis.set_major_locator(yloc)
pylab.savefig("Current_Displacement_At_Interface", bbox_inches='tight')
# plt.show()
| cc0-1.0 |
nikitasingh981/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 103 | 2017 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matplotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
isb-cgc/ISB-CGC-data-proc | tcga_etl_pipeline/maf/part2/transform.py | 1 | 11107 | """ Script to merge Oncotator file
"""
#### -*- coding: utf-8 -*-
import sys
import time
import pandas as pd
import json
import check_duplicates
from bigquery_etl.utils import gcutils
from bigquery_etl.extract.gcloud_wrapper import GcsConnector
from bigquery_etl.utils.logging_manager import configure_logging
from bigquery_etl.extract.utils import convert_file_to_dataframe
from bigquery_etl.transform.tools import cleanup_dataframe, remove_duplicates
from bigquery_etl.execution import process_manager
log_filename = 'etl_maf_part2.log'
log_name = 'etl_maf_part2.log'
log = configure_logging(log_name, log_filename)
#--------------------------------------
# Format Oncotator output before merging
#--------------------------------------
def format_oncotator_columns(df):
df.columns = map(lambda x: x.replace('1000', '_1000'), df.columns)
df.columns = map(lambda x: x.replace('gencode', 'GENCODE'), df.columns)
df.columns = map(lambda x: '_'.join([''.join(i[0].upper() + i[1:]) for i in x.split('_')]), df.columns)
# adjust columns
replace_columns = {
'Tumor_Sample_Barcode' : 'Tumor_AliquotBarcode'
,'Matched_Norm_Sample_Barcode' : 'Normal_AliquotBarcode'
,'Match_Norm_Seq_Allele1' : 'Normal_Seq_Allele1'
,'Match_Norm_Seq_Allele2' : 'Normal_Seq_Allele2'
,'Match_Norm_Validation_Allele1' : 'Normal_Validation_Allele1'
,'Match_Norm_Validation_Allele2' : 'Normal_Validation_Allele2'
,'Gc_Content' : 'GC_Content'
,'CDNA_Change' : 'cDNA_Change'
}
for i in replace_columns:
df.columns = map(lambda x: x.replace(i, replace_columns[i]), df.columns)
return df
#------------------------------------------
# this adds news columns and does a few checks on the columns
#------------------------------------------
def add_columns(df, sample_code2letter, study):
## Add new columns
df['Tumor_SampleBarcode'] = df['Tumor_AliquotBarcode'].map(lambda x: '-'.join(x.split('-')[0:4]))
df['Tumor_ParticipantBarcode'] = df['Tumor_AliquotBarcode'].map(lambda x: '-'.join(x.split('-')[0:3]))
df['Tumor_SampleTypeLetterCode'] = df['Tumor_AliquotBarcode'].map(lambda x: sample_code2letter[x.split('-')[3][0:2]])
df['Normal_SampleBarcode'] = df['Normal_AliquotBarcode'].map(lambda x: '-'.join(x.split('-')[0:4]))
df['Normal_ParticipantBarcode'] = df['Normal_AliquotBarcode'].map(lambda x: '-'.join(x.split('-')[0:3]))
df['Normal_SampleTypeLetterCode'] = df['Normal_AliquotBarcode'].map(lambda x: sample_code2letter[x.split('-')[3][0:2]])
df['Center'] = df['Center'].map(lambda x: ';'.join(sorted(x.split(';'))) if ';' in x else x)
df['Study'] = study
df['NCBI_Build'] = 37
# ---------------------------------------------
# Checks
# ---------------------------------------------
# check patient_id
tumor_patient_id_bool = (df['Tumor_ParticipantBarcode'] == df['Normal_ParticipantBarcode'])
df = df[tumor_patient_id_bool]
if not df[~tumor_patient_id_bool].empty:
log.error('ERROR: did not find all tumors paired with normal samples')
raise ValueError('ERROR: did not find all tumors paired with normal samples')
# tumor barcode 14th character must be 0
tumor_sample_codes = map(lambda x: x.split('-')[3][0], df['Tumor_AliquotBarcode'])
if '0' not in tumor_sample_codes and len(tumor_sample_codes) > 0:
log.error('ERROR: tumor barcode 14th character must be 0')
raise ValueError('ERROR: tumor barcode 14th character must be 0')
# normal barcode 14th character must be 1
norm_sample_codes = map(lambda x: x.split('-')[3][0], df['Normal_AliquotBarcode'])
if '1' not in norm_sample_codes and len(norm_sample_codes) > 0:
log.error('ERROR: normal barcode 14th character must be 1')
raise ValueError('ERROR: normal barcode 14th character must be 1')
df['ParticipantBarcode'] = df['Tumor_ParticipantBarcode']
del df['Tumor_ParticipantBarcode']
del df['Normal_ParticipantBarcode']
return df
#----------------------------------------
# this is the main function to process oncotator MAF files
# 1. this merges the different files by disease type
# 2. selects the columns to load into BigQuery
# 3. format the oncotator columns
# 4. adds new columns
# 5. removes any duplicate aliqouts
#----------------------------------------
def process_oncotator_output(project_id, bucket_name, data_library, bq_columns, sample_code2letter, oncotator_object_path, oncotator_object_output_path):
study = data_library['Study'].iloc[0]
# this needed to stop pandas from converting them to FLOAT
dtype = {
"Transcript_Exon" : "object"
,"NCBI_Build" : "object"
,"COSMIC_Total_Alterations_In_Gene" : "object"
,"CCLE_ONCOMAP_Total_Mutations_In_Gene" : "object"
,"HGNC_HGNC_ID" : "object"
,"UniProt_AApos" : "object"
,"Transcript_Position" : "object"
,"HGNC_OMIM_ID_Supplied_By_NCBI" : "object"
}
file_count = 0
# create an empty dataframe. we use this to merge dataframe
disease_bigdata_df = pd.DataFrame()
# iterate over the selected files
for oncotator_file in data_library['filename']:
file_count+= 1
log.info('-'*10 + "{0}: Processing file {1}".format(file_count, oncotator_file) + '-'*10)
try:
# covert the file to a dataframe
filename = oncotator_object_path + oncotator_file
gcs = GcsConnector(project_id, bucket_name)
log.info('%s: converting %s to dataframe' % (study, filename))
df = gcutils.convert_blob_to_dataframe(gcs, project_id, bucket_name, filename, log = log)
log.info('%s: done converting %s to dataframe' % (study, filename))
except RuntimeError as re:
log.warning('%s: problem cleaning dataframe for %s: %s' % (study, filename, re))
except Exception as e:
log.exception('%s: problem converting to dataframe for %s: %s' % (study, filename, e))
raise e
if df.empty:
log.warning('%s: empty dataframe for file: %s' % (study, oncotator_file))
continue
#------------------------------
# different operations on the frame
#------------------------------
# get only the required BigQuery columns
df = df[bq_columns]
# format oncotator columns; name changes etc
df = format_oncotator_columns(df)
# add new columns
df = add_columns(df, sample_code2letter, study)
disease_bigdata_df = disease_bigdata_df.append(df, ignore_index = True)
log.info('-'*10 + "{0}: Finished file({3}) {1}. rows: {2}".format(file_count, oncotator_file, len(df), study) + '-'*10)
# this is a merged dataframe
if not disease_bigdata_df.empty:
# remove duplicates; various rules; see check duplicates)
log.info('\tcalling check_duplicates to collapse aliquots with %s rows' % (len(disease_bigdata_df)))
disease_bigdata_df = check_duplicates.remove_maf_duplicates(disease_bigdata_df, sample_code2letter, log)
log.info('\tfinished check_duplicates to collapse aliquots with %s rows' % (len(disease_bigdata_df)))
# enforce unique mutation--previous
# unique_mutation = ['Chromosome', 'Start_Position', 'End_Position', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2', 'Tumor_AliquotBarcode']
# enforce unique mutation
unique_mutation = ['Hugo_Symbol', 'Entrez_Gene_Id', 'Chromosome', 'Start_Position', 'End_Position', 'Reference_Allele', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2',
'Tumor_AliquotBarcode']
# merge mutations from multiple centers
log.info('\tconsolidate the centers for duplicate mutations into list for %s' % (study))
seencenters = set()
def concatcenters(df_group):
if len(df_group) > 1:
centers = list(set(df_group['Center'].tolist()))
uniquecenters = set()
delim = config['maf']['center_delim']
for center in centers:
fields = center.split(delim)
for field in fields:
uniquecenters.add(field)
sortedunique = delim.join(sorted(list(uniquecenters)))
df_group.loc[:,'Center'] = sortedunique
if sortedunique not in seencenters:
log.info('unique centers: %s' % sortedunique)
seencenters.add(sortedunique)
return df_group
disease_bigdata_df = disease_bigdata_df.groupby(unique_mutation).apply(concatcenters)
log.info('\tfinished consolidating centers for duplicate mutations for %s' % (study))
# enforce unique mutation
log.info('\tcalling remove_duplicates to collapse mutations with %s rows for %s' % (len(disease_bigdata_df), study))
disease_bigdata_df = remove_duplicates(disease_bigdata_df, unique_mutation)
log.info('\tfinished remove_duplicates to collapse mutations with %s rows for %s' % (len(disease_bigdata_df), study))
# convert the disease_bigdata_df to new-line JSON and upload the file
uploadpath = oncotator_object_output_path + "{0}.json".format(study)
log.info('%s: uploading %s to GCS' % (study, uploadpath))
gcs.convert_df_to_njson_and_upload(disease_bigdata_df, uploadpath)
log.info('%s: done uploading %s to GCS' % (study, uploadpath))
else:
log.warning('Empty dataframe for %s in %s!' % (oncotator_file, study))
return True
if __name__ == '__main__':
log.info('start maf part2 pipeline')
config = json.load(open(sys.argv[1]))
project_id = config['project_id']
bucket_name = config['buckets']['open']
sample_code2letter = config['sample_code2letter']
# get disease_codes/studies( TODO this must be changed to get the disease code from the file name)
df = convert_file_to_dataframe(open(sys.argv[2]))
df = cleanup_dataframe(df)
studies = list(set(df['Study'].tolist()))
# get bq columns ( this allows the user to select the columns
# , without worrying about the index, case-sensitivenes etc
selected_columns = pd.read_table(sys.argv[3], names=['bq_columns'])
transposed = selected_columns.T
transposed.columns = transposed.loc['bq_columns']
transposed = cleanup_dataframe(transposed)
bq_columns = transposed.columns.values
# submit threads by disease code
pm = process_manager.ProcessManager(max_workers=33, db='maf.db', table='task_queue_status', log=log)
for idx, df_group in df.groupby(['Study']):
future = pm.submit(process_oncotator_output, project_id, bucket_name, df_group, bq_columns, sample_code2letter, config['maf']['oncotator_object_path'], config['maf']['oncotator_object_output_path'])
#process_oncotator_output( project_id, bucket_name, df_group, bq_columns, sample_code2letter)
time.sleep(0.2)
pm.start()
log.info('finished maf part2 pipeline')
| apache-2.0 |
bh107/bohrium | bridge/bh107/bh107/random.py | 2 | 19401 | # -*- coding: utf-8 -*-
import os
import datetime
import numpy as np
from . import bharray, _dtype_util, util
from .ufuncs import ufunc_dict
from bohrium_api import _bh_api
class RandomState:
def __init__(self, seed=None):
"""Container for the Random123 pseudo-random number generator.
`RandomState` exposes a number of methods for generating random numbers
drawn from a variety of probability distributions. In addition to the
distribution-specific arguments, each method takes a keyword argument
`size` that defaults to ``None``. If `size` is ``None``, then a single
value is generated and returned. If `size` is an integer, then a 1-D
array filled with generated values is returned. If `size` is a tuple,
then an array with that shape is filled and returned.
Parameters
----------
seed : int, optional
Random seed initializing the pseudo-random number generator.
Can be an integer or ``None`` (the default).
If `seed` is ``None``, then `RandomState` will try to read data from
``/dev/urandom`` (or the Windows analogue) if available or seed from
the clock otherwise.
"""
self.key = None
self.index = None
self.has_gauss = None
self.seed(seed)
def seed(self, seed=None):
"""Seed the generator.
This method is called when `RandomState` is initialized. It can be
called again to re-seed the generator. For details, see `RandomState`.
Parameters
----------
seed : int or array_like, optional
Seed for `RandomState`.
See Also
--------
RandomState
"""
if seed is None:
try:
self.key = np.uint32(hash(os.urandom(8)))
except NotImplementedError:
self.key = np.uint32(hash(datetime.datetime.now()))
else:
self.key = np.uint32(seed)
self.index = 0
self.has_gauss = False
def get_state(self):
"""Return a tuple representing the internal state of the generator.
For more details, see `set_state`.
Returns
-------
out : tuple(str, np.uint64, np.uint32)
The returned tuple has the following items:
1. the string 'Random123'.
2. an integer ``index``.
3. an integer ``key``.
See Also
--------
set_state
Notes
-----
`set_state` and `get_state` are not needed to work with any of the
random distributions in Bohrium. If the internal state is manually altered,
the user should know exactly what he/she is doing.
"""
return ('Random123', self.index, self.key)
def set_state(self, state):
"""Set the internal state of the generator from a tuple.
For use if one has reason to manually (re-)set the internal state of the
"Mersenne Twister"[1]_ pseudo-random number generating algorithm.
Parameters
----------
state : tuple(str, np.uint64, np.uint32)
The returned tuple has the following items:
1. the string 'Random123'.
2. an integer ``index``.
3. an integer ``key``.
Returns
-------
out : None
Returns 'None' on success.
See Also
--------
get_state
Notes
-----
`set_state` and `get_state` are not needed to work with any of the
random distributions in Bohrium. If the internal state is manually altered,
the user should know exactly what he/she is doing.
"""
if len(state) != 3:
raise ValueError("state must contain 3 elements")
if state[0] != 'Random123':
raise ValueError("algorithm must be 'Random123'")
try:
self.index = state[1]
self.key = state[2]
except TypeError:
raise ValueError("state is not a valid Random123 state")
def random123(self, shape):
"""New array of uniform pseudo numbers based on the random123 philox2x32 algorithm.
NB: dtype is np.uint64 always
Parameters
----------
shape : int or tuple of ints
Defines the shape of the returned array of random floats.
Returns
-------
out : Array of uniform pseudo numbers
"""
if np.isscalar(shape):
shape = (shape,)
length = util.total_size(shape)
flat = bharray.BhArray(length, np.uint64)
if length > 0:
_bh_api.random123(flat._bhc_handle, self.index, self.key)
self.index += flat.nelem
return flat.reshape(shape)
def random_sample(self, shape):
"""Return random floats in the half-open interval [0.0, 1.0).
Results are from the "continuous uniform" distribution over the
stated interval. To sample :math:`Unif[a, b), b > a` multiply
the output of `random_sample` by `(b-a)` and add `a`::
(b - a) * random() + a
Parameters
----------
shape : int or tuple of ints
Defines the shape of the returned array of random floats.
Returns
-------
out : BhArray of floats
Array of random floats of shape `shape`.
Examples
--------
>>> np.random.random((5,))
array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428])
Three-by-two array of random numbers from [-5, 0):
>>> 5 * np.random.random((3, 2)) - 5
array([[-3.99149989, -0.52338984],
[-2.99091858, -0.79479508],
[-1.23204345, -1.75224494]])
"""
# Generate random numbers as uint
r_uint = self.random123(shape)
# Convert random numbers to float in the interval [0.0, 1.0) and return.
return r_uint.astype(np.float64) / np.iinfo(np.uint64).max
def randint(self, low, high=None, shape=None):
"""Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution in the
"half-open" interval [`low`, `high`). If `high` is None (the default),
then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is the *highest* such
integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
shape : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
out : BhArray of ints
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
See Also
--------
random.random_integers : similar to `randint`, only for the closed
interval [`low`, `high`], and 1 is the lowest value if `high` is
omitted. In particular, this other one is the one to use to generate
uniformly distributed discrete non-integers.
Examples
--------
>>> np.random.randint(2, size=10)
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
if high is None:
high = low
low = 0
if low >= high:
raise ValueError("low >= high")
diff = high - low
return self.random123(shape) % diff + low
def uniform(self, low=0.0, high=1.0, shape=None):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
shape : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
out : BhArray
Drawn samples, with shape `shape`.
See Also
--------
randint : Discrete uniform distribution, yielding integers.
random_integers : Discrete uniform distribution over the closed
interval ``[low, high]``.
random_sample : Floats uniformly distributed over ``[0, 1)``.
random : Alias for `random_sample`.
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
Notes
-----
The probability density function of the uniform distribution is
.. math:: p(x) = \\frac{1}{b - a}
anywhere within the interval ``[a, b)``, and zero elsewhere.
same as:
random_sample(size) * (high - low) + low
Examples
--------
Draw samples from the distribution:
>>> s = np.random.uniform(-1,0,1000)
All values are within the given interval:
>>> np.all(s >= -1)
True
>>> np.all(s < 0)
True
Display the histogram of the samples, along with the
probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 15, normed=True)
>>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
>>> plt.show()
"""
return self.random_sample(shape).astype(np.float64) * (high - low) + low
def rand(self, *shape):
"""Random values in a given shape.
Create an array of the given shape and propagate it with
random samples from a uniform distribution
over ``[0, 1)``.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned array, should all be positive.
If no argument is given a single Python float is returned.
Returns
-------
out : BhArray, shape ``(d0, d1, ..., dn)``
Random values.
See Also
--------
random
Notes
-----
This is a convenience function. If you want an interface that
takes a shape-tuple as the first argument, refer to
np.random.random_sample .
Examples
--------
>>> np.random.rand(3,2)
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
"""
return self.random_sample(shape)
def random_integers(self, low, high=None, shape=None):
"""Return random integers between `low` and `high`, inclusive.
Return random integers from the "discrete uniform" distribution in the
closed interval [`low`, `high`]. If `high` is None (the default),
then results are from [1, `low`].
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is the *highest* such
integer).
high : int, optional
If provided, the largest (signed) integer to be drawn from the
distribution (see above for behavior if ``high=None``).
shape : tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
out : BhArray of ints
`size`-shaped array of random integers from the appropriate
distribution.
See Also
--------
random.randint : Similar to `random_integers`, only for the half-open
interval [`low`, `high`), and 0 is the lowest value if `high` is
omitted.
Notes
-----
To sample from N evenly spaced floating-point numbers between a and b,
use::
a + (b - a) * (bh107.random.random_integers(N) - 1) / (N - 1.)
Examples
--------
>>> np.random.random_integers(5)
4
>>> type(np.random.random_integers(5))
<type 'int'>
>>> np.random.random_integers(5, size=(3.,2.))
array([[5, 4],
[3, 3],
[4, 5]])
Choose five random numbers from the set of five evenly-spaced
numbers between 0 and 2.5, inclusive (*i.e.*, from the set
:math:`{0, 5/8, 10/8, 15/8, 20/8}`):
>>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.
array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ])
Roll two six sided dice 1000 times and sum the results:
>>> d1 = np.random.random_integers(1, 6, 1000)
>>> d2 = np.random.random_integers(1, 6, 1000)
>>> dsums = d1 + d2
Display results as a histogram:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(dsums, 11, normed=True)
>>> plt.show()
"""
if high is None:
high = low
low = 1
return self.randint(low, high + 1, shape)
def standard_exponential(self, shape=None):
""" Draw samples from the standard exponential distribution.
`standard_exponential` is identical to the exponential distribution
with a scale parameter of 1.
Parameters
----------
shape : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
out : BhArray
Drawn samples.
Examples
--------
Output a 3x8000 array:
>>> n = np.random.standard_exponential((3, 8000))
"""
# We use -log(1-U) since U is [0, 1) */
return -1 * ufunc_dict['log'](1 - self.random_sample(shape))
def exponential(self, scale=1.0, shape=None):
""" Exponential distribution.
Its probability density function is
.. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
The rate parameter is an alternative, widely used parameterization
of the exponential distribution [3]_.
The exponential distribution is a continuous analogue of the
geometric distribution. It describes many common situations, such as
the size of raindrops measured over many rainstorms [1]_, or the time
between page requests to Wikipedia [2]_.
Parameters
----------
scale : float
The scale parameter, :math:`\\beta = 1/\\lambda`.
shape : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn.
Returns
-------
out : BhArray
Drawn samples.
References
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
Random Signal Principles", 4th ed, 2001, p. 57.
.. [2] "Poisson Process", Wikipedia,
http://en.wikipedia.org/wiki/Poisson_process
.. [3] "Exponential Distribution, Wikipedia,
http://en.wikipedia.org/wiki/Exponential_distribution
"""
if scale <= 0:
raise ValueError("The `scale` must be greater than zero")
return self.standard_exponential(shape) * scale
def random(self, shape=None):
"""Return random floats in the half-open interval [0.0, 1.0).
Alias for `random_sample`
"""
return self.random_sample(shape)
def sample(self, shape=None):
"""Return random floats in the half-open interval [0.0, 1.0).
Alias for `random_sample`
"""
return self.random_sample(shape)
def ranf(self, shape=None):
"""Return random floats in the half-open interval [0.0, 1.0).
Alias for `random_sample`
"""
return self.random_sample(shape)
def random_of_dtype(self, dtype, shape=None):
"""Return random array of `dtype`. The values are in the interval of the `dtype`.
Parameters
----------
dtype : data-type
The desired data-type for the array.
shape : int or tuple of ints
Defines the shape of the returned array of random floats.
Returns
-------
out : BhArray of floats
Array of random floats of shape `shape`.
"""
dtype = _dtype_util.obj_to_dtype(dtype)
if dtype is np.bool:
res = self.random_integers(0, 1, shape)
elif dtype in [np.int8, np.uint8]:
res = self.random_integers(1, 3, shape)
elif dtype is np.int16:
res = self.random_integers(1, 5, shape)
elif dtype is np.uint16:
res = self.random_integers(1, 6, shape)
elif dtype in [np.float32, np.float64]:
res = self.random_sample(shape)
elif dtype in [np.complex64, np.complex128]:
res = self.random_sample(shape=shape) + self.random_sample(shape=shape) * 1j
else:
res = self.random_integers(1, 8, shape)
if len(res.shape) == 0: # Make sure scalars is arrays.
res = bharray.BhArray.from_object(res)
res.shape = shape
return res.astype(dtype)
# The default random object
_inst = RandomState()
seed = _inst.seed
get_state = _inst.get_state
set_state = _inst.set_state
random_sample = _inst.random_sample
random = _inst.random
sample = _inst.sample
ranf = _inst.ranf
randint = _inst.randint
uniform = _inst.uniform
rand = _inst.rand
random_integers = _inst.random_integers
standard_exponential = _inst.standard_exponential
exponential = _inst.exponential
random_of_dtype = _inst.random_of_dtype
| apache-2.0 |
IshankGulati/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 11 | 10179 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils.fixes import bincount
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_am_new.py | 34 | 2606 | # -*- coding: utf-8 -*-
"""Example for gam.AdditiveModel and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS, WLS
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 1000 #1000 #with 1000, OLS and Additivemodel aggree in params at 2 decimals
lb, ub = -3.5, 4#2.5
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*2, x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) / 2.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 1
if example == 1:
m = AdditiveModel(d)
m.fit(y)
y_pred = m.results.predict(d)
for ss in m.smoothers:
print(ss.params)
res_ols = OLS(y, exog_reduced).fit()
print(res_ols.params)
#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)
if example > 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exog)
y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(y, '.', alpha=0.25)
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)
plt.plot(y_pred, 'r-', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)
plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel ' + ii)
counter += 1
plt.show() | bsd-3-clause |
alongwithyou/auto-sklearn | autosklearn/models/models.py | 5 | 10413 | import numpy as np
import scipy as sp
from sklearn.linear_model import Ridge, RidgeClassifier, LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, BaggingClassifier, BaggingRegressor, RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from autosklearn.constants import *
import operator
import copy
class MyAutoML:
''' Rough sketch of a class that "solves" the AutoML problem. We illustrate various type of data that will be encountered in the challenge can be handled.
Also, we make sure that the model regularly outputs predictions on validation and test data, such that, if the execution of the program is interrupted (timeout)
there are still results provided by the program. The baseline methods chosen are not optimized and do not provide particularly good results.
In particular, no special effort was put into dealing with missing values and categorical variables.
The constructor selects a model based on the data information passed as argument. This is a form of model selection "filter".
We anticipate that the participants may compute a wider range of statistics to perform filter model selection.
We also anticipate that the participants will conduct cross-validation experiments to further select amoung various models
and hyper-parameters of the model. They might walk trough "model space" systematically (e.g. with grid search), heuristically (e.g. with greedy strategies),
or stochastically (random walks). This example does not bother doing that. We simply use a growing ensemble of models to improve predictions over time.
We use ensemble methods that vote on an increasing number of classifiers. For efficiency, we use WARM START that re-uses
already trained base predictors, when available.
IMPORTANT: This is just a "toy" example:
- if was checked only on the phase 0 data at the time of release
- not all cases are considered
- this could easily break on datasets from further phases
- this is very inefficient (most ensembles_statistics have no "warm start" option, hence we do a lot of unnecessary calculations)
- there is no preprocessing
'''
def __init__(self, info, verbose=True, debug_mode=False):
self.label_num=info['label_num']
self.target_num=info['target_num']
self.task = info['task']
self.metric = info['metric']
self.postprocessor = None
self.seed = 1
#self.postprocessor = MultiLabelEnsemble(LogisticRegression(), balance=True) # To calibrate proba
self.postprocessor = MultiLabelEnsemble(LogisticRegression(), balance=False) # To calibrate proba
if debug_mode>=2:
self.name = "RandomPredictor"
self.model = RandomPredictor(self.target_num)
self.predict_method = self.model.predict_proba
return
if info['task']==REGRESSION:
if info['is_sparse']==True:
self.name = "BaggingRidgeRegressor"
self.model = BaggingRegressor(base_estimator=Ridge(), n_estimators=1, verbose=verbose, random_state=self.seed) # unfortunately, no warm start...
else:
self.name = "GradientBoostingRegressor"
self.model = GradientBoostingRegressor(n_estimators=1, verbose=verbose, warm_start = True, random_state=self.seed)
self.predict_method = self.model.predict # Always predict probabilities
else:
if info['has_categorical']: # Out of lazziness, we do not convert categorical variables...
self.name = "RandomForestClassifier"
self.model = RandomForestClassifier(n_estimators=1, verbose=verbose, random_state=self.seed) # unfortunately, no warm start...
elif info['is_sparse']:
self.name = "BaggingNBClassifier"
self.model = BaggingClassifier(base_estimator=BernoulliNB(), n_estimators=1, verbose=verbose, random_state=self.seed) # unfortunately, no warm start...
else:
self.name = "GradientBoostingClassifier"
self.model = eval(self.name + "(n_estimators=1, verbose=" + str(verbose) + ", min_samples_split=10, random_state=1, warm_start = True)")
if info['task']==MULTILABEL_CLASSIFICATION:
self.model = MultiLabelEnsemble(self.model)
self.predict_method = self.model.predict_proba
def __repr__(self):
return "MyAutoML : " + self.name
def __str__(self):
return "MyAutoML : \n" + str(self.model)
def fit(self, X, Y):
self.model.fit(X,Y)
# Train a calibration model postprocessor
if self.task != REGRESSION and self.postprocessor!=None:
Yhat = self.predict_method(X)
self.postprocessor.fit(Yhat, Y)
return self
def predict(self, X):
prediction = self.predict_method(X)
# Calibrate proba
if self.task != REGRESSION and self.postprocessor!=None:
prediction = self.postprocessor.predict_proba(prediction)
# Keep only 2st column because the second one is 1-first
if self.target_num==1 and len(prediction.shape)>1 and prediction.shape[1]>1:
prediction = prediction[:,1]
# Make sure the normalization is correct
if self.task==MULTICLASS_CLASSIFICATION:
eps = 1e-15
norma = np.sum(prediction, axis=1)
for k in range(prediction.shape[0]):
prediction[k,:] /= sp.maximum(norma[k], eps)
return prediction
class MultiLabelEnsemble:
''' MultiLabelEnsemble(predictorInstance, balance=False)
Like OneVsRestClassifier: Wrapping class to train multiple models when
several objectives are given as target values. Its predictor may be an ensemble.
This class can be used to create a one-vs-rest classifier from multiple 0/1 labels
to treat a multi-label problem or to create a one-vs-rest classifier from
a categorical target variable.
Arguments:
predictorInstance -- A predictor instance is passed as argument (be careful, you must instantiate
the predictor class before passing the argument, i.e. end with (),
e.g. LogisticRegression().
balance -- True/False. If True, attempts to re-balance classes in training data
by including a random sample (without replacement) s.t. the largest class has at most 2 times
the number of elements of the smallest one.
Example Usage: mymodel = MultiLabelEnsemble (GradientBoostingClassifier(), True)'''
def __init__(self, predictorInstance, balance=False):
self.predictors = [predictorInstance]
self.n_label = 1
self.n_target = 1
self.n_estimators = 1 # for predictors that are ensembles_statistics of estimators
self.balance=balance
def __repr__(self):
return "MultiLabelEnsemble"
def __str__(self):
return "MultiLabelEnsemble : \n" + "\tn_label={}\n".format(self.n_label) + "\tn_target={}\n".format(self.n_target) + "\tn_estimators={}\n".format(self.n_estimators) + str(self.predictors[0])
def fit(self, X, Y):
if len(Y.shape)==1:
Y = np.array([Y]).transpose() # Transform vector into column matrix
# This is NOT what we want: Y = Y.reshape( -1, 1 ), because Y.shape[1] out of range
self.n_target = Y.shape[1] # Num target values = num col of Y
self.n_label = len(set(Y.ravel())) # Num labels = num classes (categories of categorical var if n_target=1 or n_target if labels are binary )
# Create the right number of copies of the predictor instance
if len(self.predictors)!=self.n_target:
predictorInstance = self.predictors[0]
self.predictors = [predictorInstance]
for i in range(1,self.n_target):
self.predictors.append(copy.copy(predictorInstance))
# Fit all predictors
np.random.seed(1)
for i in range(self.n_target):
# Update the number of desired prodictos
if hasattr(self.predictors[i], 'n_estimators'):
self.predictors[i].n_estimators=self.n_estimators
# Subsample if desired
if self.balance:
pos = Y[:,i]>0
neg = Y[:,i]<=0
if sum(pos)<sum(neg):
chosen = pos
not_chosen = neg
else:
chosen = neg
not_chosen = pos
num = sum(chosen)
idx=filter(lambda(x): x[1]==True, enumerate(not_chosen))
idx=np.array(zip(*idx)[0])
np.random.shuffle(idx)
chosen[idx[0:min(num, len(idx))]]=True
# Train with chosen samples
self.predictors[i].fit(X[chosen,:],Y[chosen,i])
else:
self.predictors[i].fit(X,Y[:,i])
return
def predict_proba(self, X):
prediction = self.predictors[0].predict_proba(X)
if self.n_label==2: # Keep only 1 prediction, 1st column = (1 - 2nd column)
prediction = prediction[:,1]
for i in range(1,self.n_target): # More than 1 target, we assume that labels are binary
new_prediction = self.predictors[i].predict_proba(X)[:,1]
prediction = np.column_stack((prediction, new_prediction))
return prediction
class RandomPredictor:
''' Make random predictions.'''
def __init__(self, target_num):
self.target_num=target_num
return
def __repr__(self):
return "RandomPredictor"
def __str__(self):
return "RandomPredictor"
def fit(self, X, Y):
if len(Y.shape)>1:
assert(self.target_num==Y.shape[1])
return self
def predict_proba(self, X):
prediction = np.random.rand(X.shape[0],self.target_num)
return prediction
| bsd-3-clause |
glouppe/scikit-learn | sklearn/tests/test_multiclass.py | 14 | 21802 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
INCF/pybids | bids/variables/collections.py | 1 | 23588 | """Classes and functions related to the management of sets of BIDSVariables."""
from copy import copy
import warnings
import re
from collections import OrderedDict
from itertools import chain
import fnmatch
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from .variables import (SparseRunVariable, SimpleVariable, DenseRunVariable,
merge_variables, BIDSVariable)
from bids.utils import listify, matches_entities
class BIDSVariableCollection(object):
"""A container for one or more variables extracted from variable files
at a single level of analysis.
Parameters
----------
variables : list
A list of BIDSVariables or SimpleVariables.
Notes
-----
Variables in the list must all share the same analysis level, which
must be one of 'session', 'subject', or 'dataset' level. For
run-level Variables, use the BIDSRunVariableCollection.
"""
def __init__(self, variables):
if not variables:
raise ValueError("No variables were provided")
SOURCE_TO_LEVEL = {
'events': 'run',
'physio': 'run',
'stim': 'run',
'regressors': 'run',
'scans': 'session',
'sessions': 'subject',
'participants': 'dataset'
}
var_levels = set([SOURCE_TO_LEVEL[v.source] if v.source in
SOURCE_TO_LEVEL else v.source for v in variables])
# TODO: relax this requirement & allow implicit merging between levels
if len(var_levels) > 1:
raise ValueError("A Collection cannot be initialized from "
"variables at more than one level of analysis. "
"Levels found in input variables: %s" %
var_levels)
elif not var_levels:
raise ValueError(
"None of the provided variables matched any of the known levels, which are: %s"
% (', '.join(sorted(SOURCE_TO_LEVEL.values())))
)
self.level = list(var_levels)[0]
variables = self.merge_variables(variables)
self.variables = {v.name: v for v in variables}
self._index_entities()
# Container for variable groups (see BIDS-StatsModels spec)--maps from
# group names to lists of variables.
self.groups = {}
@staticmethod
def merge_variables(variables, **kwargs):
"""Concatenates Variables along row axis.
Parameters
----------
variables : list
List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns
-------
list
A list of Variables.
"""
var_dict = OrderedDict()
for v in variables:
if v.name not in var_dict:
var_dict[v.name] = []
var_dict[v.name].append(v)
return [merge_variables(vars_, **kwargs)
for vars_ in list(var_dict.values())]
def to_df(self, variables=None, format='wide', fillna=np.nan,
entities=True, timing=True):
"""Merge variables into a single pandas DataFrame.
Parameters
----------
variables : list of str or BIDSVariable
Optional list of variables or variable names to retain. If strings
are passed, each one gives the name of a variable in the current
collection. If BIDSVariables are passed, they will be used as-is.
If None, all variables are returned. Strings and BIDSVariables
cannot be mixed in the list.
format : {'wide', 'long'}
Whether to return a DataFrame in 'wide' or 'long' format. In 'wide'
format, each row is defined by a unique entity combination, and
each variable is in a separate column. In 'long' format, each row
is a unique combination of entities and variable names, and a
single 'amplitude' column provides the value.
fillna : value
Replace missing values with the specified value.
entities : bool
Whether or not to include a column for each entity.
timing : bool
Whether or not to include onset and duration columns.
Returns
-------
:obj:`pandas.DataFrame`
A pandas DataFrame.
"""
if variables is None:
variables = list(self.variables.keys())
# Can receive already-selected Variables from sub-classes
if not isinstance(variables[0], BIDSVariable):
variables = [v for v in self.variables.values()
if v.name in variables]
# Convert all variables to separate DFs.
# Note: bad things can happen if we pass the conditions, entities, and
# timing flags through to the individual variables and then do
# concat/reshaping operations. So instead, we set them all to True
# temporarily, do what we need to, then drop them later if needed.
dfs = [v.to_df(True, True, timing=True) for v in variables]
# Always concatenate along row axis (for format='wide', we'll pivot).
df = pd.concat(dfs, axis=0, sort=True)
ind_cols = list(set(df.columns) - {'condition', 'amplitude'})
ent_cols = list(set(ind_cols) - {'onset', 'duration'})
if format == 'long':
df = df.reset_index(drop=True).fillna(fillna)
else:
df['amplitude'] = df['amplitude'].fillna('n/a')
df = df.pivot_table(index=ind_cols, columns='condition',
values='amplitude', aggfunc='first')
df = df.reset_index().replace('n/a', fillna)
df.columns.name = None
# Drop any columns we don't want
if not timing:
df.drop(['onset', 'duration'], axis=1, inplace=True)
if not entities:
df.drop(ent_cols, axis=1, inplace=True)
return df
@classmethod
def from_df(cls, data, entities=None, source='contrast'):
"""Create a Collection from a pandas DataFrame.
Parameters
----------
df : :obj:`pandas.DataFrame`
The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities : :obj:`pandas.DataFrame`
An optional second DataFrame containing
entity information.
source : str
The value to set as the source for all Variables.
Returns
-------
BIDSVariableCollection
"""
variables = []
for col in data.columns:
_data = pd.DataFrame(data[col].values, columns=['amplitude'])
if entities is not None:
_data = pd.concat([_data, entities], axis=1, sort=True)
variables.append(SimpleVariable(name=col, data=_data, source=source))
return BIDSVariableCollection(variables)
def clone(self):
"""Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
"""
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone
def _index_entities(self):
"""Sets current instance's entities based on the existing index.
Notes
-----
Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
"""
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()])
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)}
def __getitem__(self, var):
if var in self.variables:
return self.variables[var]
keys = list(self.variables.keys())
raise ValueError("No variable named '{}' found in this collection. "
"Available names are {}.".format(var, keys))
def __setitem__(self, var, obj):
# Ensure name matches collection key, but raise warning if needed.
if obj.name != var:
warnings.warn("The provided key to use in the collection ('%s') "
"does not match the passed Column object's existing "
"name ('%s'). The Column name will be set to match "
"the provided key." % (var, obj.name))
obj.name = var
self.variables[var] = obj
def match_variables(self, pattern, return_type='name', match_type='unix'):
"""Return columns whose names match the provided pattern.
Parameters
----------
pattern : str, list
One or more regex patterns to match all variable names against.
return_type : {'name', 'variable'}
What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
match_type : str
Matching approach to use. Either 'regex' (full-blown regular
expression matching) or 'unix' (unix-style pattern matching
via the fnmatch module).
Returns
-------
A list of all matching variables or variable names
"""
pattern = listify(pattern)
results = []
for patt in pattern:
if match_type.lower().startswith('re'):
patt = re.compile(patt)
vars_ = [v for v in self.variables.keys() if patt.search(v)]
else:
vars_ = fnmatch.filter(list(self.variables.keys()), patt)
if return_type.startswith('var'):
vars_ = [self.variables[v] for v in vars_]
results.extend(vars_)
return results
class BIDSRunVariableCollection(BIDSVariableCollection):
"""A container for one or more RunVariables--i.e., Variables that have a
temporal dimension.
Parameters
----------
variables : list
A list of SparseRunVariable and/or DenseRunVariable.
sampling_rate : float
Sampling rate (in Hz) to use when working with
dense representations of variables. If None, defaults to 10.
Notes
-----
Variables in the list must all be at the 'run' level. For other
levels (session, subject, or dataset), use the
BIDSVariableCollection.
"""
def __init__(self, variables, sampling_rate=None):
# Don't put the default value in signature because None is passed from
# several places and we don't want multiple conflicting defaults.
if sampling_rate:
if isinstance(sampling_rate, str):
raise ValueError("Sampling rate must be numeric.")
self.sampling_rate = sampling_rate or 10
super(BIDSRunVariableCollection, self).__init__(variables)
def get_dense_variables(self, variables=None):
"""Returns a list of all stored DenseRunVariables."""
if variables is None:
variables = set(self.variables.keys())
return [v for v in self.variables.values()
if isinstance(v, DenseRunVariable) and v.name in variables]
def get_sparse_variables(self, variables=None):
"""Returns a list of all stored SparseRunVariables."""
if variables is None:
variables = set(self.variables.keys())
return [v for v in self.variables.values()
if isinstance(v, SparseRunVariable) and v.name in variables]
def all_dense(self):
return len(self.get_dense_variables()) == len(self.variables)
def all_sparse(self):
return len(self.get_sparse_variables()) == len(self.variables)
def _get_sampling_rate(self, sampling_rate):
"""Parse sampling rate argument and return appropriate value."""
if sampling_rate is None:
return self.sampling_rate
if isinstance(sampling_rate, (float, int)):
return sampling_rate
if sampling_rate == 'TR':
trs = {var.run_info[0].tr for var in self.variables.values()}
if not trs:
raise ValueError("Repetition time unavailable; specify "
"sampling_rate in Hz explicitly or set to"
" 'highest'.")
elif len(trs) > 1:
raise ValueError("Non-unique Repetition times found "
"({!r}); specify sampling_rate explicitly"
.format(trs))
return 1. / trs.pop()
if sampling_rate.lower() == 'highest':
dense_vars = self.get_dense_variables()
# If no dense variables are available, fall back on instance SR
if not dense_vars:
return self.sampling_rate
var_srs = [v.sampling_rate for v in dense_vars]
if len(var_srs) == 1:
return var_srs[0]
return max(*var_srs)
raise ValueError("Invalid sampling_rate value '{}' provided. Must be "
"a float, None, 'TR', or 'highest'."
.format(sampling_rate))
def _densify_and_resample(self, sampling_rate=None, variables=None,
resample_dense=False, force_dense=False,
in_place=False, kind='linear'):
sampling_rate = self._get_sampling_rate(sampling_rate)
_dense, _sparse = [], []
# Filter variables and sort by class
for name, var in self.variables.items():
if variables is not None and name not in variables:
continue
if isinstance(var, DenseRunVariable):
_dense.append(var)
else:
_sparse.append(var)
_variables = {}
if force_dense:
for v in _sparse:
if is_numeric_dtype(v.values):
_variables[v.name] = v.to_dense(sampling_rate)
if resample_dense:
for v in _dense:
_variables[v.name] = v.resample(sampling_rate, kind=kind)
coll = self if in_place else self.clone()
if in_place:
coll.variables.update(_variables)
else:
coll.variables = _variables
coll.sampling_rate = sampling_rate
return coll
def to_dense(self, sampling_rate=None, variables=None, in_place=False,
kind='linear'):
""" Convert all contained SparseRunVariables to DenseRunVariables.
Parameters
----------
sampling_rate : None, {'TR', 'highest'}, float
Sampling rate to use when densifying sparse variables. If None,
uses the currently stored instance value. If 'TR', the repetition
time is used, if available, to select the sampling rate (1/TR).
If 'highest', all variables are resampled to the highest sampling
rate of any of the existing dense variables. The sampling rate may
also be specified explicitly in Hz as a float.
variables : list
Optional list of names of Variables to resample. If None, all
variables are resampled.
in_place : bool
When True, all variables are overwritten in-place.
When False, returns resampled versions of all variables.
kind : str
Argument to pass to scipy's interp1d; indicates the kind of
interpolation approach to use. See interp1d docs for valid values.
Returns
-------
A BIDSVariableCollection (if in_place is False).
Notes
-----
Categorical variables are ignored.
"""
return self._densify_and_resample(sampling_rate, variables,
resample_dense=False,
in_place=in_place, kind=kind,
force_dense=True)
def resample(self, sampling_rate=None, variables=None, force_dense=False,
in_place=False, kind='linear'):
"""Resample all dense variables (and optionally, sparse ones) to the
specified sampling rate.
Parameters
----------
sampling_rate : int or float
Target sampling rate (in Hz). If None, uses the instance value.
variables : list
Optional list of names of Variables to resample. If None, all
variables are resampled.
force_dense : bool
if True, all sparse variables will be forced to dense.
in_place : bool
When True, all variables are overwritten in-place.
When False, returns resampled versions of all variables.
kind : str
Argument to pass to scipy's interp1d; indicates the kind of
interpolation approach to use. See interp1d docs for valid values.
Returns
-------
A BIDSVariableCollection (if in_place is False).
"""
return self._densify_and_resample(sampling_rate, variables,
force_dense=force_dense, in_place=in_place,
kind=kind, resample_dense=True)
def to_df(self, variables=None, format='wide', fillna=np.nan,
sampling_rate='highest', include_sparse=True, include_dense=True,
entities=True, timing=True):
"""Merge columns into a single pandas DataFrame.
Parameters
----------
variables : list
Optional list of variable names to retain;
if None, all variables are written out.
format : str
Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
fillna : value
Replace missing values with the specified value.
sampling_rate : float
Specifies the sampling rate to use for all variables in the event
that resampling needs to be performed (i.e., if some variables are
sparse, or if dense variables have different sampling rates). Must
be one of 'TR', 'highest', None, or a float (specifying the rate in
Hz). If None, uses the instance sampling rate (10 Hz by default).
include_sparse : bool
Whether or not to include sparse variables in the output.
include_dense : bool
Whether or not to include dense variables in the output.
entities : bool
Whether or not to include a column for each entity.
timing : bool
Whether or not to include onset and duration columns.
Returns
-------
:obj:`pandas.DataFrame`
A pandas DataFrame.
Notes
-----
The precise format of the resulting DataFrame depends on the variables
contained in the current instance. If all variables are sparse, the
output will also be sparse--i.e., the events in the DataFrame may have
non-uniform timing. If at least one dense variable is present, and the
user has not explicitly excluded dense variables (by setting
include_dense=False), all selected variables will be implicitly
converted to dense using the specified `sampling_rate` (if provided).
To avoid unexpected behavior, we recommend converting mixed collections
to all-dense form explicitly via the `to_dense()` or `resample()`
methods before calling `to_df()`.
"""
if not include_sparse and not include_dense:
raise ValueError("You can't exclude both dense and sparse "
"variables! That leaves nothing!")
_vars = []
if include_sparse:
_vars += self.get_sparse_variables(variables)
if include_dense:
_vars += self.get_dense_variables(variables)
if not _vars:
raise ValueError("No variables were selected for output.")
# If all variables are sparse/simple, we can pass them as-is. Otherwise
# we first force all variables to dense via .resample().
if all(isinstance(v, SimpleVariable) for v in _vars):
variables = _vars
else:
sampling_rate = sampling_rate or self.sampling_rate
var_names = [v.name for v in _vars]
collection = self.resample(sampling_rate, variables=var_names,
force_dense=include_sparse)
variables = list(collection.variables.values())
return super().to_df(variables, format, fillna, entities=entities,
timing=timing)
def merge_collections(collections, sampling_rate='highest', output_level=None):
"""Merge two or more collections at the same level of analysis.
Parameters
----------
collections : list
List of Collections to merge.
sampling_rate : int or str
Sampling rate to use if it becomes necessary
to resample DenseRunVariables. Either an integer or 'highest' (see
merge_variables docstring for further explanation).
output_level : str, optional
Assign a new level (e.g., 'run', 'subject', etc.) to the merged
collection. If None, the current level is retained.
Returns
-------
BIDSVariableCollection or BIDSRunVariableCollection
Result type depends on the type of the input collections.
"""
collections = listify(collections)
if len(collections) == 1:
return collections[0]
levels = set([c.level for c in collections])
if len(levels) > 1:
raise ValueError("At the moment, it's only possible to merge "
"Collections at the same level of analysis. You "
"passed collections at levels: %s." % levels)
variables = list(chain(*[c.variables.values() for c in collections]))
cls = collections[0].__class__
variables = cls.merge_variables(variables, sampling_rate=sampling_rate)
if isinstance(collections[0], BIDSRunVariableCollection):
# 'auto' was renamed to 'highest' circa 0.10, but check for both
if sampling_rate in {'auto', 'highest'}:
rates = [var.sampling_rate for var in variables
if isinstance(var, DenseRunVariable)]
sampling_rate = rates[0] if rates else None
return cls(variables, sampling_rate)
# For non-run collections, we may need to set a different output level
coll = cls(variables)
if output_level is not None:
coll.level = output_level
return coll
| mit |
Omkar20895/climate | ocw/plotter.py | 5 | 48622 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tempfile import TemporaryFile
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib import rcParams, cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import ImageGrid
import scipy.stats.mstats as mstats
import numpy as np
import numpy.ma as ma
import ocw.utils as utils
# Set the default colormap to coolwarm
mpl.rc('image', cmap='coolwarm')
def set_cmap(name):
'''
Sets the default colormap (eg when setting cmap=None in a function)
See: http://matplotlib.org/examples/pylab_examples/show_colormaps.html
for a list of possible colormaps.
Appending '_r' to a matplotlib colormap name will give you a reversed
version of it.
:param name: The name of the colormap.
:type name: :mod:`string`
'''
# The first line is redundant but it prevents the user from setting
# the cmap rc value improperly
cmap = plt.get_cmap(name)
mpl.rc('image', cmap=cmap.name)
def _nice_intervals(data, nlevs):
'''
Purpose::
Calculates nice intervals between each color level for colorbars
and contour plots. The target minimum and maximum color levels are
calculated by taking the minimum and maximum of the distribution
after cutting off the tails to remove outliers.
Input::
data - an array of data to be plotted
nlevs - an int giving the target number of intervals
Output::
clevs - A list of floats for the resultant colorbar levels
'''
# Find the min and max levels by cutting off the tails of the distribution
# This mitigates the influence of outliers
data = data.ravel()
mn = mstats.scoreatpercentile(data, 5)
mx = mstats.scoreatpercentile(data, 95)
# if min less than 0 and or max more than 0 put 0 in center of color bar
if mn < 0 and mx > 0:
level = max(abs(mn), abs(mx))
mnlvl = -1 * level
mxlvl = level
# if min is larger than 0 then have color bar between min and max
else:
mnlvl = mn
mxlvl = mx
# hack to make generated intervals from mpl the same for all versions
autolimit_mode = mpl.rcParams.get('axes.autolimit_mode')
if autolimit_mode:
mpl.rc('axes', autolimit_mode='round_numbers')
locator = mpl.ticker.MaxNLocator(nlevs)
clevs = locator.tick_values(mnlvl, mxlvl)
if autolimit_mode:
mpl.rc('axes', autolimit_mode=autolimit_mode)
# Make sure the bounds of clevs are reasonable since sometimes
# MaxNLocator gives values outside the domain of the input data
clevs = clevs[(clevs >= mnlvl) & (clevs <= mxlvl)]
return clevs
def _best_grid_shape(nplots, oldshape):
'''
Purpose::
Calculate a better grid shape in case the user enters more columns
and rows than needed to fit a given number of subplots.
Input::
nplots - an int giving the number of plots that will be made
oldshape - a tuple denoting the desired grid shape (nrows, ncols) for arranging
the subplots originally requested by the user.
Output::
newshape - the smallest possible subplot grid shape needed to fit nplots
'''
nrows, ncols = oldshape
size = nrows * ncols
diff = size - nplots
if diff < 0:
raise ValueError(
'gridshape=(%d, %d): Cannot fit enough subplots for data' % (nrows, ncols))
else:
# If the user enters an excessively large number of
# rows and columns for gridshape, automatically
# correct it so that it fits only as many plots
# as needed
while diff >= nrows:
ncols -= 1
size = nrows * ncols
diff = size - nplots
# Don't forget to remove unnecessary columns too
if ncols == 1:
nrows = nplots
newshape = nrows, ncols
return newshape
def _fig_size(gridshape, aspect=None):
'''
Purpose::
Calculates the figure dimensions from a subplot gridshape
Input::
gridshape - Tuple denoting the subplot gridshape
aspect - Float denoting approximate aspect ratio of each subplot
(width / height). Default is 8.5 / 5.5
Output::
width - float for width of the figure in inches
height - float for height of the figure in inches
'''
if aspect is None:
aspect = 8.5 / 5.5
# Default figure size is 8.5" x 5.5" if nrows == ncols
# and then adjusted by given aspect ratio
nrows, ncols = gridshape
if nrows >= ncols:
# If more rows keep width constant
width, height = (aspect * 5.5), 5.5 * (nrows // ncols)
else:
# If more columns keep height constant
width, height = (aspect * 5.5) * (ncols // nrows), 5.5
return width, height
def draw_taylor_diagram(results, names, refname, fname, fmt='png',
gridshape=(1, 1), ptitle='', subtitles=None,
pos='upper right', frameon=True, radmax=1.5, legend_size=13):
''' Draw a Taylor diagram.
:param results: An Nx2 array containing normalized standard deviations,
correlation coefficients, and names of evaluation results.
:type results: :class:`numpy.ndarray`
:param names: A list of names for each evaluated dataset
:type names: :class:`list` of :mod:`string`
:param refname: The name of the reference dataset.
:type refname: :mod:`string`
:param fname: The filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output plot.
:type fmt: :mod:`string`
:param gridshape: (Optional) Tuple denoting the desired grid shape
(num_rows, num_cols) for arranging the subplots.
:type gridshape: A :class:`tuple` of the form (num_rows, num_cols)
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param subtitles: (Optional) list of strings specifying the title for each
subplot.
:type subtitles: :class:`list` of :mod:`string`
:param pos: (Optional) string or tuple of floats used to set the position
of the legend. Check the `Matplotlib docs <http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_
for additional information.
:type pos: :mod:`string` or :func:`tuple` of :class:`float`
:param frameon: (Optional) boolean specifying whether to draw a frame
around the legend box.
:type frameon: :class:`bool`
:param radmax: (Optional) float to adjust the extent of the axes in terms of
standard deviation.
:type radmax: :class:`float`
:param legend_size: (Optional) float to control the font size of the legend
:type legend_size: :class:`float`
'''
# Handle the single plot case.
if results.ndim == 2:
results = results.reshape(1, *results.shape)
# Make sure gridshape is compatible with input data
nplots = results.shape[0]
gridshape = _best_grid_shape(nplots, gridshape)
# Set up the figure
fig = plt.figure()
fig.set_size_inches((8.5, 11))
fig.dpi = 300
for i, data in enumerate(results):
rect = gridshape + (i + 1,)
# Convert rect to string form as expected by TaylorDiagram constructor
rect = str(rect[0]) + str(rect[1]) + str(rect[2])
# Create Taylor Diagram object
dia = TaylorDiagram(1, fig=fig, rect=rect,
label=refname, radmax=radmax)
for i, (stddev, corrcoef) in enumerate(data):
dia.add_sample(stddev, corrcoef, marker='$%d$' % (i + 1), ms=6,
label=names[i])
if subtitles is not None:
dia._ax.set_title(subtitles[i])
# Add legend
legend = fig.legend(dia.samplePoints,
[p.get_label() for p in dia.samplePoints],
handlelength=0., prop={'size': legend_size}, numpoints=1,
loc=pos)
legend.draw_frame(frameon)
plt.subplots_adjust(wspace=0)
# Add title and save the figure
fig.suptitle(ptitle)
plt.tight_layout(.05, .05)
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def draw_subregions(subregions, lats, lons, fname, fmt='png', ptitle='',
parallels=None, meridians=None, subregion_masks=None):
''' Draw subregion domain(s) on a map.
:param subregions: The subregion objects to plot on the map.
:type subregions: :class:`list` of subregion objects (Bounds objects)
:param lats: Array of latitudes values.
:type lats: :class:`numpy.ndarray`
:param lons: Array of longitudes values.
:type lons: :class:`numpy.ndarray`
:param fname: The filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param parallels: (Optional) :class:`list` of :class:`int` or :class:`float` for the parallels to
be drawn. See the `Basemap documentation <http://matplotlib.org/basemap/users/graticule.html>`_
for additional information.
:type parallels: :class:`list` of :class:`int` or :class:`float`
:param meridians: (Optional) :class:`list` of :class:`int` or :class:`float` for the meridians to
be drawn. See the `Basemap documentation <http://matplotlib.org/basemap/users/graticule.html>`_
for additional information.
:type meridians: :class:`list` of :class:`int` or :class:`float`
:param subregion_masks: (Optional) :class:`dict` of :class:`bool` arrays for each
subregion for giving finer control of the domain to be drawn, by default
the entire domain is drawn.
:type subregion_masks: :class:`dict` of :class:`bool` arrays
'''
# Set up the figure
fig = plt.figure()
fig.set_size_inches((8.5, 11.))
fig.dpi = 300
ax = fig.add_subplot(111)
# Determine the map boundaries and construct a Basemap object
lonmin = lons.min()
lonmax = lons.max()
latmin = lats.min()
latmax = lats.max()
m = Basemap(projection='cyl', llcrnrlat=latmin, urcrnrlat=latmax,
llcrnrlon=lonmin, urcrnrlon=lonmax, resolution='l', ax=ax)
# Draw the borders for coastlines and countries
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=.75)
m.drawstates()
# Create default meridians and parallels. The interval between
# them should be 1, 5, 10, 20, 30, or 40 depending on the size
# of the domain
length = max((latmax - latmin), (lonmax - lonmin)) / 5
if length <= 1:
dlatlon = 1
elif length <= 5:
dlatlon = 5
else:
dlatlon = np.round(length, decimals=-1)
if meridians is None:
meridians = np.r_[
np.arange(0, -180, -dlatlon)[::-1], np.arange(0, 180, dlatlon)]
if parallels is None:
parallels = np.r_[np.arange(0, -90, -dlatlon)
[::-1], np.arange(0, 90, dlatlon)]
# Draw parallels / meridians
m.drawmeridians(meridians, labels=[0, 0, 0, 1], linewidth=.75, fontsize=10)
m.drawparallels(parallels, labels=[1, 0, 0, 1], linewidth=.75, fontsize=10)
# Set up the color scaling
cmap = plt.cm.rainbow
norm = mpl.colors.BoundaryNorm(np.arange(1, len(subregions) + 3), cmap.N)
# Process the subregions
for i, reg in enumerate(subregions):
if subregion_masks is not None and reg.name in subregion_masks.keys():
domain = (i + 1) * subregion_masks[reg.name]
else:
domain = (i + 1) * np.ones((2, 2))
nlats, nlons = domain.shape
domain = ma.masked_equal(domain, 0)
reglats = np.linspace(reg.lat_min, reg.lat_max, nlats)
reglons = np.linspace(reg.lon_min, reg.lon_max, nlons)
reglons, reglats = np.meshgrid(reglons, reglats)
# Convert to to projection coordinates. Not really necessary
# for cylindrical projections but keeping it here in case we need
# support for other projections.
x, y = m(reglons, reglats)
# Draw the subregion domain
m.pcolormesh(x, y, domain, cmap=cmap, norm=norm, alpha=.5)
# Label the subregion
xm, ym = x.mean(), y.mean()
m.plot(xm, ym, marker='$%s$' %
("R" + str(i + 1)), markersize=12, color='k')
# Add the title
ax.set_title(ptitle)
# Save the figure
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def _get_colors(num_colors):
"""
matplotlib will recycle colors after a certain number. This can make
line type charts confusing as colors will be reused. This function
provides a distribution of colors across the default color map
to better approximate uniqueness.
:param num_colors: The number of unique colors to generate.
:return: A color map with num_colors.
"""
cmap = plt.get_cmap()
return [cmap(1. * i / num_colors) for i in range(num_colors)]
def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
xlabel='', ylabel='', ptitle='', subtitles=None,
label_month=False, yscale='linear', aspect=None,
cycle_colors=True, cmap=None):
''' Draw a time series plot.
:param results: 3D array of time series data.
:type results: :class:`numpy.ndarray`
:param times: List of Python datetime objects used by Matplotlib to handle
axis formatting.
:type times: :class:`list` of :class:`datetime.datetime`
:param labels: List of names for each data being plotted.
:type labels: :class:`list` of :mod:`string`
:param fname: Filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param gridshape: (Optional) tuple denoting the desired grid shape
(num_rows, num_cols) for arranging the subplots.
:type gridshape: :func:`tuple` of the form (num_rows, num_cols)
:param xlabel: (Optional) x-axis title.
:type xlabel: :mod:`string`
:param ylabel: (Optional) y-axis title.
:type ylabel: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param subtitles: (Optional) list of titles for each subplot.
:type subtitles: :class:`list` of :mod:`string`
:param label_month: (Optional) flag to toggle drawing month labels on the
x-axis.
:type label_month: :class:`bool`
:param yscale: (Optional) y-axis scale value, 'linear' for linear and 'log'
for log base 10.
:type yscale: :mod:`string`
:param aspect: (Optional) approximate aspect ratio of each subplot
(width / height). Default is 8.5 / 5.5
:type aspect: :class:`float`
:param cycle_colors: (Optional) flag to toggle whether to allow matlibplot
to re-use colors when plotting or force an evenly distributed range.
:type cycle_colors: :class:`bool`
:param cmap: (Optional) string or :class:`matplotlib.colors.LinearSegmentedColormap`
instance denoting the colormap. This must be able to be recognized by
`Matplotlib's get_cmap function <http://matplotlib.org/api/cm_api.html#matplotlib.cm.get_cmap>`_.
Maps like rainbow and spectral with wide spectrum of colors are nice choices when used with
the cycle_colors option. tab20, tab20b, and tab20c are good if the plot has less than 20 datasets.
:type cmap: :mod:`string` or :class:`matplotlib.colors.LinearSegmentedColormap`
'''
if cmap is not None:
set_cmap(cmap)
# Handle the single plot case.
if results.ndim == 2:
results = results.reshape(1, *results.shape)
# Make sure gridshape is compatible with input data
nplots = results.shape[0]
gridshape = _best_grid_shape(nplots, gridshape)
# Set up the figure
width, height = _fig_size(gridshape)
fig = plt.figure()
fig.set_size_inches((width, height))
fig.dpi = 300
# Make the subplot grid
grid = ImageGrid(fig, 111,
nrows_ncols=gridshape,
axes_pad=0.3,
share_all=True,
add_all=True,
ngrids=nplots,
label_mode='L',
aspect=False,
cbar_mode='single',
cbar_location='bottom',
cbar_size=.05,
cbar_pad=.20
)
# Make the plots
for i, ax in enumerate(grid):
data = results[i]
if not cycle_colors:
ax.set_prop_cycle('color', _get_colors(data.shape[0]))
if label_month:
xfmt = mpl.dates.DateFormatter('%b')
xloc = mpl.dates.MonthLocator()
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(xloc)
# Set the y-axis scale
ax.set_yscale(yscale)
# Set up list of lines for legend
lines = []
ymin, ymax = 0, 0
# Plot each line
for tSeries in data:
line = ax.plot_date(times, tSeries, '')
lines.extend(line)
cmin, cmax = tSeries.min(), tSeries.max()
ymin = min(ymin, cmin)
ymax = max(ymax, cmax)
# Add a bit of padding so lines don't touch bottom and top of the plot
ymin = ymin - ((ymax - ymin) * 0.1)
ymax = ymax + ((ymax - ymin) * 0.1)
ax.set_ylim((ymin, ymax))
# Set the subplot title if desired
if subtitles is not None:
ax.set_title(subtitles[i], fontsize='small')
# Create a master axes rectangle for figure wide labels
fax = fig.add_subplot(111, frameon=False)
fax.tick_params(labelcolor='none', top='off',
bottom='off', left='off', right='off')
fax.set_ylabel(ylabel)
fax.set_title(ptitle, fontsize=16)
fax.title.set_y(1.04)
# Create the legend using a 'fake' colorbar axes. This lets us have a nice
# legend that is in sync with the subplot grid
cax = ax.cax
cax.set_frame_on(False)
cax.set_xticks([])
cax.set_yticks([])
cax.legend((lines), labels, loc='upper center', ncol=10, fontsize='small',
mode='expand', frameon=False)
# Note that due to weird behavior by axes_grid, it is more convenient to
# place the x-axis label relative to the colorbar axes instead of the
# master axes rectangle.
cax.set_title(xlabel, fontsize=12)
cax.title.set_y(-1.5)
# Rotate the x-axis tick labels
for ax in grid:
for xtick in ax.get_xticklabels():
xtick.set_ha('right')
xtick.set_rotation(30)
# Save the figure
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def draw_barchart(results, yvalues, fname, ptitle='', fmt='png',
xlabel='', ylabel=''):
''' Draw a barchart.
:param results: 1D array of data.
:type results: :class:`numpy.ndarray`
:param yvalues: List of y-axis labels
:type times: :class:`list`
:param fname: Filename of the plot.
:type fname: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param xlabel: (Optional) x-axis title.
:type xlabel: :mod:`string`
:param ylabel: (Optional) y-axis title.
:type ylabel: :mod:`string`
'''
y_pos = list(range(len(yvalues)))
fig = plt.figure()
fig.set_size_inches((11., 8.5))
fig.dpi = 300
ax = plt.subplot(111)
plt.barh(y_pos, results, align="center", height=0.8, linewidth=0)
plt.yticks(y_pos, yvalues)
plt.tick_params(axis="both", which="both", bottom="on", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ymin = min(y_pos)
ymax = max(y_pos)
ymin = min((ymin - ((ymax - ymin) * 0.1) / 2), 0.5)
ymax = ymax + ((ymax - ymin) * 0.1)
ax.set_ylim((ymin, ymax))
plt.xlabel(xlabel)
plt.tight_layout()
# Save the figure
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def draw_marker_on_map(lat, lon, fname, fmt='png', location_name=' ', gridshape=(1, 1)):
'''Draw a marker on a map.
:param lat: Latitude for plotting a marker.
:type lat: :class:`float`
:param lon: Longitude for plotting a marker.
:type lon: :class:`float`
:param fname: The filename of the plot.
:type fname: :class:`string`
:param fmt: (Optional) Filetype for the output.
:type fmt: :class:`string`
:param location_name: (Optional) A label for the map marker.
:type location_name: :class:`string`
'''
fig = plt.figure()
fig.dpi = 300
ax = fig.add_subplot(111)
m = Basemap(projection='cyl', resolution='c', llcrnrlat=lat -
30, urcrnrlat=lat + 30, llcrnrlon=lon - 60, urcrnrlon=lon + 60)
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=1)
m.drawmapboundary(fill_color='aqua')
m.fillcontinents(color='coral', lake_color='aqua')
m.ax = ax
xpt, ypt = m(lon, lat)
m.plot(xpt, ypt, 'bo') # plot a blue dot there
# put some text next to the dot, offset a little bit
# (the offset is in map projection coordinates)
plt.text(xpt + 0.5, ypt + 1.5, location_name +
'\n(lon: %5.1f, lat: %3.1f)' % (lon, lat))
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
clabel='', ptitle='', subtitles=None, cmap=None,
clevs=None, nlevs=10, parallels=None, meridians=None,
extend='neither', aspect=8.5 / 2.5):
''' Draw a multiple panel contour map plot.
:param dataset: 3D array of data to be plotted with shape (nT, nLat, nLon).
:type dataset: :class:`numpy.ndarray`
:param lats: Array of latitudes values.
:type lats: :class:`numpy.ndarray`
:param lons: Array of longitudes
:type lons: :class:`numpy.ndarray`
:param fname: The filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param gridshape: (Optional) tuple denoting the desired grid shape
(num_rows, num_cols) for arranging the subplots.
:type gridshape: :func:`tuple` of the form (num_rows, num_cols)
:param clabel: (Optional) colorbar title.
:type clabel: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param subtitles: (Optional) list of titles for each subplot.
:type subtitles: :class:`list` of :mod:`string`
:param cmap: (Optional) string or :class:`matplotlib.colors.LinearSegmentedColormap`
instance denoting the colormap. This must be able to be recognized by
`Matplotlib's get_cmap function <http://matplotlib.org/api/cm_api.html#matplotlib.cm.get_cmap>`_.
:type cmap: :mod:`string` or :class:`matplotlib.colors.LinearSegmentedColormap`
:param clevs: (Optional) contour levels values.
:type clevs: :class:`list` of :class:`int` or :class:`float`
:param nlevs: (Optional) target number of contour levels if clevs is None.
:type nlevs: :class:`int`
:param parallels: (Optional) list of ints or floats for the parallels to
be drawn. See the `Basemap documentation <http://matplotlib.org/basemap/users/graticule.html>`_
for additional information.
:type parallels: :class:`list` of :class:`int` or :class:`float`
:param meridians: (Optional) list of ints or floats for the meridians to
be drawn. See the `Basemap documentation <http://matplotlib.org/basemap/users/graticule.html>`_
for additional information.
:type meridians: :class:`list` of :class:`int` or :class:`float`
:param extend: (Optional) flag to toggle whether to place arrows at the colorbar
boundaries. Default is 'neither', but can also be 'min', 'max', or
'both'. Will be automatically set to 'both' if clevs is None.
:type extend: :mod:`string`
'''
# Handle the single plot case. Meridians and Parallels are not labeled for
# multiple plots to save space.
if dataset.ndim == 2 or (dataset.ndim == 3 and dataset.shape[0] == 1):
if dataset.ndim == 2:
dataset = dataset.reshape(1, *dataset.shape)
mlabels = [0, 0, 0, 1]
plabels = [1, 0, 0, 1]
else:
mlabels = [0, 0, 0, 0]
plabels = [0, 0, 0, 0]
# Make sure gridshape is compatible with input data
nplots = dataset.shape[0]
gridshape = _best_grid_shape(nplots, gridshape)
# Set up the figure
fig = plt.figure()
fig.set_size_inches((8.5, 11.))
fig.dpi = 300
# Make the subplot grid
grid = ImageGrid(fig, 111,
nrows_ncols=gridshape,
axes_pad=0.3,
share_all=True,
add_all=True,
ngrids=nplots,
label_mode='L',
cbar_mode='single',
cbar_location='bottom',
cbar_size=.15,
cbar_pad='0%'
)
# Determine the map boundaries and construct a Basemap object
lonmin = lons.min()
lonmax = lons.max()
latmin = lats.min()
latmax = lats.max()
m = Basemap(projection='cyl', llcrnrlat=latmin, urcrnrlat=latmax,
llcrnrlon=lonmin, urcrnrlon=lonmax, resolution='l')
# Convert lats and lons to projection coordinates
if lats.ndim == 1 and lons.ndim == 1:
lons, lats = np.meshgrid(lons, lats)
# Calculate contour levels if not given
if clevs is None:
# Cut off the tails of the distribution
# for more representative contour levels
clevs = _nice_intervals(dataset, nlevs)
extend = 'both'
cmap = plt.get_cmap(cmap)
# Create default meridians and parallels. The interval between
# them should be 1, 5, 10, 20, 30, or 40 depending on the size
# of the domain
length = max((latmax - latmin), (lonmax - lonmin)) / 5
if length <= 1:
dlatlon = 1
elif length <= 5:
dlatlon = 5
else:
dlatlon = np.round(length, decimals=-1)
if meridians is None:
meridians = np.r_[
np.arange(0, -180, -dlatlon)[::-1], np.arange(0, 180, dlatlon)]
if parallels is None:
parallels = np.r_[np.arange(0, -90, -dlatlon)
[::-1], np.arange(0, 90, dlatlon)]
x, y = m(lons, lats)
for i, ax in enumerate(grid):
# Load the data to be plotted
data = dataset[i]
m.ax = ax
# Draw the borders for coastlines and countries
m.drawcoastlines(linewidth=1)
m.drawcountries(linewidth=.75)
# Draw parallels / meridians
m.drawmeridians(meridians, labels=mlabels, linewidth=.75, fontsize=10)
m.drawparallels(parallels, labels=plabels, linewidth=.75, fontsize=10)
# Draw filled contours
cs = m.contourf(x, y, data, cmap=cmap, levels=clevs, extend=extend)
# Add title
if subtitles is not None:
ax.set_title(subtitles[i], fontsize='small')
# Add colorbar
cbar = fig.colorbar(cs, cax=ax.cax, drawedges=True,
orientation='horizontal', extendfrac='auto')
cbar.set_label(clabel)
cbar.set_ticks(clevs)
cbar.ax.tick_params(labelsize=6)
cbar.ax.xaxis.set_ticks_position('none')
cbar.ax.yaxis.set_ticks_position('none')
# This is an ugly hack to make the title show up at the correct height.
# Basically save the figure once to achieve tight layout and calculate
# the adjusted heights of the axes, then draw the title slightly above
# that height and save the figure again
fig.savefig(TemporaryFile(), bbox_inches='tight', dpi=fig.dpi)
ymax = 0
for ax in grid:
bbox = ax.get_position()
ymax = max(ymax, bbox.ymax)
# Add figure title
fig.suptitle(ptitle, y=ymax + .06, fontsize=16)
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
gridshape=(1, 1), xlabel='', ylabel='', clabel='',
ptitle='', subtitles=None, cmap=None, clevs=None,
nlevs=10, extend='neither', aspect=None):
''' Draw a portrait diagram plot.
:param results: 3D array of the fields to be plotted. The second dimension
should correspond to the number of rows in the diagram and the
third should correspond to the number of columns.
:type results: :class:`numpy.ndarray`
:param rowlabels: Labels for each row.
:type rowlabels: :class:`list` of :mod:`string`
:param collabels: Labels for each row.
:type collabels: :class:`list` of :mod:`string`
:param fname: Filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param gridshape: (Optional) tuple denoting the desired grid shape
(num_rows, num_cols) for arranging the subplots.
:type gridshape: :func:`tuple` of the form (num_rows, num_cols)
:param xlabel: (Optional) x-axis title.
:type xlabel: :mod:`string`
:param ylabel: (Optional) y-ayis title.
:type ylabel: :mod:`string`
:param clabel: (Optional) colorbar title.
:type clabel: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param subtitles: (Optional) list of titles for each subplot.
:type subtitles: :class:`list` of :mod:`string`
:param cmap: (Optional) string or :class:`matplotlib.colors.LinearSegmentedColormap`
instance denoting the colormap. This must be able to be recognized by
`Matplotlib's get_cmap function <http://matplotlib.org/api/cm_api.html#matplotlib.cm.get_cmap>`_.
:type cmap: :mod:`string` or :class:`matplotlib.colors.LinearSegmentedColormap`
:param clevs: (Optional) contour levels values.
:type clevs: :class:`list` of :class:`int` or :class:`float`
:param nlevs: Optional target number of contour levels if clevs is None.
:type nlevs: :class:`int`
:param extend: (Optional) flag to toggle whether to place arrows at the colorbar
boundaries. Default is 'neither', but can also be 'min', 'max', or
'both'. Will be automatically set to 'both' if clevs is None.
:type extend: :mod:`string`
:param aspect: (Optional) approximate aspect ratio of each subplot
(width / height). Default is 8.5 / 5.5
:type aspect: :class:`float`
'''
# Handle the single plot case.
if results.ndim == 2:
results = results.reshape(1, *results.shape)
nplots = results.shape[0]
# Make sure gridshape is compatible with input data
gridshape = _best_grid_shape(nplots, gridshape)
# Row and Column labels must be consistent with the shape of
# the input data too
prows, pcols = results.shape[1:]
if len(rowlabels) != prows or len(collabels) != pcols:
raise ValueError(
'rowlabels and collabels must have %d and %d elements respectively' % (prows, pcols))
# Set up the figure
width, height = _fig_size(gridshape)
fig = plt.figure()
fig.set_size_inches((width, height))
fig.dpi = 300
# Make the subplot grid
grid = ImageGrid(fig, 111,
nrows_ncols=gridshape,
axes_pad=0.4,
share_all=True,
aspect=False,
add_all=True,
ngrids=nplots,
label_mode='all',
cbar_mode='single',
cbar_location='bottom',
cbar_size=.15,
cbar_pad='3%'
)
# Calculate colorbar levels if not given
if clevs is None:
# Cut off the tails of the distribution
# for more representative colorbar levels
clevs = _nice_intervals(results, nlevs)
extend = 'both'
cmap = plt.get_cmap(cmap)
norm = mpl.colors.BoundaryNorm(clevs, cmap.N)
# Do the plotting
for i, ax in enumerate(grid):
data = results[i]
cs = ax.matshow(data, cmap=cmap, aspect='auto',
origin='lower', norm=norm)
# Add grid lines
ax.xaxis.set_ticks(np.arange(data.shape[1] + 1))
ax.yaxis.set_ticks(np.arange(data.shape[0] + 1))
x = (ax.xaxis.get_majorticklocs() - .5)
y = (ax.yaxis.get_majorticklocs() - .5)
ax.vlines(x, y.min(), y.max())
ax.hlines(y, x.min(), x.max())
# Configure ticks
ax.xaxis.tick_bottom()
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_xticklabels(collabels, fontsize='xx-small')
ax.set_yticklabels(rowlabels, fontsize='xx-small')
# Add axes title
if subtitles is not None:
ax.text(0.5, 1.04, subtitles[i], va='center', ha='center',
transform=ax.transAxes, fontsize='small')
# Create a master axes rectangle for figure wide labels
fax = fig.add_subplot(111, frameon=False)
fax.tick_params(labelcolor='none', top='off',
bottom='off', left='off', right='off')
fax.set_ylabel(ylabel)
fax.set_title(ptitle, fontsize=16)
fax.title.set_y(1.04)
# Add colorbar
cax = ax.cax
cbar = fig.colorbar(cs, cax=cax, norm=norm, boundaries=clevs, drawedges=True,
extend=extend, orientation='horizontal', extendfrac='auto')
cbar.set_label(clabel)
cbar.set_ticks(clevs)
cbar.ax.tick_params(labelsize=6)
cbar.ax.xaxis.set_ticks_position('none')
cbar.ax.yaxis.set_ticks_position('none')
# Note that due to weird behavior by axes_grid, it is more convenient to
# place the x-axis label relative to the colorbar axes instead of the
# master axes rectangle.
cax.set_title(xlabel, fontsize=12)
cax.title.set_y(1.5)
# Save the figure
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
class TaylorDiagram(object):
""" Taylor diagram helper class
Plot model standard deviation and correlation to reference (data)
sample in a single-quadrant polar plot, with r=stddev and
theta=arccos(correlation).
This class was released as public domain by the original author
Yannick Copin. You can find the original Gist where it was
released at: https://gist.github.com/ycopin/3342888
"""
def __init__(self, refstd, radmax=1.5, fig=None, rect=111, label='_'):
"""Set up Taylor diagram axes, i.e. single quadrant polar
plot, using mpl_toolkits.axisartist.floating_axes. refstd is
the reference standard deviation to be compared to.
"""
from matplotlib.projections import PolarAxes
import mpl_toolkits.axisartist.floating_axes as FA
import mpl_toolkits.axisartist.grid_finder as GF
self.refstd = refstd # Reference standard deviation
tr = PolarAxes.PolarTransform()
# Correlation labels
rlocs = np.concatenate((np.arange(10) / 10., [0.95, 0.99]))
tlocs = np.arccos(rlocs) # Conversion to polar angles
gl1 = GF.FixedLocator(tlocs) # Positions
tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))
# Standard deviation axis extent
self.smin = 0
self.smax = radmax * self.refstd
ghelper = FA.GridHelperCurveLinear(tr,
extremes=(0, np.pi / 2, # 1st quadrant
self.smin, self.smax),
grid_locator1=gl1,
tick_formatter1=tf1,
)
if fig is None:
fig = plt.figure()
ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)
fig.add_subplot(ax)
# Adjust axes
ax.axis["top"].set_axis_direction("bottom") # "Angle axis"
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
ax.axis["top"].label.set_text("Correlation")
ax.axis["left"].set_axis_direction("bottom") # "X axis"
ax.axis["left"].label.set_text("Standard deviation")
ax.axis["right"].set_axis_direction("top") # "Y axis"
ax.axis["right"].toggle(ticklabels=True)
ax.axis["right"].major_ticklabels.set_axis_direction("left")
ax.axis["bottom"].set_visible(False) # Useless
# Contours along standard deviations
ax.grid(False)
self._ax = ax # Graphical axes
self.ax = ax.get_aux_axes(tr) # Polar coordinates
# Add reference point and stddev contour
# print "Reference std:", self.refstd
l, = self.ax.plot([0], self.refstd, 'k*',
ls='', ms=10, label=label)
t = np.linspace(0, np.pi / 2)
r = np.zeros_like(t) + self.refstd
self.ax.plot(t, r, 'k--', label='_')
# Collect sample points for latter use (e.g. legend)
self.samplePoints = [l]
def add_sample(self, stddev, corrcoef, *args, **kwargs):
"""Add sample (stddev,corrcoeff) to the Taylor diagram. args
and kwargs are directly propagated to the Figure.plot
command."""
l, = self.ax.plot(np.arccos(corrcoef), stddev,
*args, **kwargs) # (theta,radius)
self.samplePoints.append(l)
return l
def add_rms_contours(self, levels=5, **kwargs):
"""Add constant centered RMS difference contours."""
rs, ts = np.meshgrid(np.linspace(self.smin, self.smax),
np.linspace(0, np.pi / 2))
# Compute centered RMS difference
rms = np.sqrt(self.refstd**2 + rs**2 - 2 *
self.refstd * rs * np.cos(ts))
contours = self.ax.contour(ts, rs, rms, levels, **kwargs)
def add_stddev_contours(self, std, corr1, corr2, **kwargs):
"""Add a curved line with a radius of std between two points
[std, corr1] and [std, corr2]"""
t = np.linspace(np.arccos(corr1), np.arccos(corr2))
r = np.zeros_like(t) + std
return self.ax.plot(t, r, 'red', linewidth=2)
def add_contours(self, std1, corr1, std2, corr2, **kwargs):
"""Add a line between two points
[std1, corr1] and [std2, corr2]"""
t = np.linspace(np.arccos(corr1), np.arccos(corr2))
r = np.linspace(std1, std2)
return self.ax.plot(t, r, 'red', linewidth=2)
def draw_histogram(dataset_array, data_names, fname, fmt='png', nbins=10):
'''
Purpose:: Draw a histogram for the input dataset.
:param dataset_array: A list of data values [data1, data2, ....].
:type dataset_array: :class:`list` of :class:`float`
:param data_names: A list of data names ['name1','name2',....].
:type data_names: :class:`list` of :class:`string`
:param fname: The filename of the plot.
:type fname: :class:`string`
:param fmt: (Optional) Filetype for the output.
:type fmt: :class:`string`
:param bins: (Optional) Number of bins.
:type bins: :class:`integer`
'''
fig = plt.figure()
fig.dpi = 300
ndata = len(dataset_array)
data_min = 500.
data_max = 0.
for data in dataset_array:
data_min = np.min([data_min, data.min()])
data_max = np.max([data_max, data.max()])
bins = np.linspace(np.round(data_min), np.round(data_max + 1), nbins)
for idata, data in enumerate(dataset_array):
ax = fig.add_subplot(ndata, 1, idata + 1)
ax.hist(data, bins, alpha=0.5, label=data_names[idata], normed=True)
leg = ax.legend()
leg.get_frame().set_alpha(0.5)
ax.set_xlim([data_min - (data_max - data_min) * 0.15,
data_max + (data_max - data_min) * 0.15])
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
def fill_US_states_with_color(regions, fname, fmt='png', ptitle='',
colors=False, values=None, region_names=None):
''' Fill the States over the contiguous US with colors
:param regions: The list of subregions(lists of US States) to be filled
with different colors.
:type regions: :class:`list`
:param fname: The filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param colors: (Optional) : If True, each region will be filled
with different colors without using values
:type colors: :class:`bool`
:param values: (Optional) : If colors==False, color for each region scales
an associated element in values
:type values: :class:`numpy.ndarray`
'''
nregion = len(regions)
if colors:
cmap = plt.cm.rainbow
if not (values is None):
cmap = plt.cm.seismic
max_abs = np.abs(values).max()
# Set up the figure
fig = plt.figure()
fig.set_size_inches((8.5, 11.))
fig.dpi = 300
ax = fig.add_subplot(111)
# create the map
m = Basemap(llcrnrlon=-127,llcrnrlat=22,urcrnrlon=-65,urcrnrlat=52,
ax=ax)
for iregion, region in enumerate(regions):
shapes = utils.shapefile_boundary('us_states', region)
patches=[]
lats=np.empty(0)
lons=np.empty(0)
for shape in shapes:
patches.append(Polygon(np.array(shape), True))
lons = np.append(lons, shape[:,0])
lats = np.append(lats, shape[:,1])
if colors:
color_to_fill=cmap((iregion+0.5)/nregion)
if not (values is None):
value = values[iregion]
color_to_fill = cmap(0.5+np.sign(value)*abs(value)/max_abs*0.45)
ax.add_collection(PatchCollection(patches, facecolor=color_to_fill))
if region_names:
ax.text(lons.mean(), lats.mean(), region_names[iregion],
ha='center', va='center', fontsize=10)
m.drawcountries(linewidth=0.)
# Add the title
ax.set_title(ptitle)
# Save the figure
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
fig.clf()
def draw_plot_to_compare_trends(obs_data, ens_data, model_data,
fname, fmt='png', ptitle='', data_labels=None,
xlabel='', ylabel=''):
''' Fill the States over the contiguous US with colors
:param obs_data: An array of observed trend and standard errors for regions
:type obs_data: :class:'numpy.ndarray'
:param ens_data: An array of trend and standard errors from a multi-model ensemble for regions
:type ens_data: : class:'numpy.ndarray'
:param model_data: An array of trends from models for regions
:type model_data: : class:'numpy.ndarray'
:param fname: The filename of the plot.
:type fname: :mod:`string`
:param fmt: (Optional) filetype for the output.
:type fmt: :mod:`string`
:param ptitle: (Optional) plot title.
:type ptitle: :mod:`string`
:param data_labels: (Optional) names of the regions
:type data_labels: :mod:`list`
:param xlabel: (Optional) a label for x-axis
:type xlabel: :mod:`string`
:param ylabel: (Optional) a label for y-axis
:type ylabel: :mod:`string`
'''
nregions = obs_data.shape[1]
# Set up the figure
fig = plt.figure()
fig.set_size_inches((8.5, 11.))
fig.dpi = 300
ax = fig.add_subplot(111)
b_plot = ax.boxplot(model_data, widths=np.repeat(0.2, nregions), positions=np.arange(nregions)+1.3)
plt.setp(b_plot['medians'], color='black')
plt.setp(b_plot['whiskers'], color='black')
plt.setp(b_plot['boxes'], color='black')
plt.setp(b_plot['fliers'], color='black')
ax.errorbar(np.arange(nregions)+0.8, obs_data[0,:], yerr=obs_data[1,:],
fmt='o', color='r', ecolor='r')
ax.errorbar(np.arange(nregions)+1., ens_data[0,:], yerr=ens_data[1,:],
fmt='o', color='b', ecolor='b')
ax.set_xticks(np.arange(nregions)+1)
ax.set_xlim([0, nregions+1])
if data_labels:
ax.set_xticklabels(data_labels)
fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight')
def draw_precipitation_JPDF (plot_data, plot_level, x_ticks, x_names,y_ticks,y_names,
output_file, title=None, diff_plot=False, cmap = cm.BrBG,
cbar_ticks=[0.01, 0.10, 0.5, 2, 5, 25],
cbar_label=['0.01', '0.10', '0.5', '2', '5', '25']):
'''
:param plot_data: a numpy array of data to plot (dimY, dimX)
:type plot_data: :class:'numpy.ndarray'
:param plot_level: levels to plot
:type plot_level: :class:'numpy.ndarray'
:param x_ticks: x values where tick makrs are located
:type x_ticks: :class:'numpy.ndarray'
:param x_names: labels for the ticks on x-axis (dimX)
:type x_names: :class:'list'
:param y_ticks: y values where tick makrs are located
:type y_ticks: :class:'numpy.ndarray'
:param y_names: labels for the ticks on y-axis (dimY)
:type y_names: :class:'list'
:param output_file: name of output png file
:type output_file: :mod:'string'
:param title: (Optional) title of the plot
:type title: :mod:'string'
:param diff_plot: (Optional) if true, a difference plot will be generated
:type diff_plot: :mod:'bool'
:param cbar_ticks: (Optional) tick marks for the color bar
:type cbar_ticks: :class:'list'
:param cbar_label: (Optional) lables for the tick marks of the color bar
:type cbar_label: :class:'list'
'''
if diff_plot:
cmap = cm.RdBu_r
fig = plt.figure()
sb = fig.add_subplot(111)
dimY, dimX = plot_data.shape
plot_data2 = np.zeros([dimY,dimX]) # sectioned array for plotting
# fontsize
rcParams['axes.labelsize'] = 8
rcParams['xtick.labelsize'] = 8
rcParams['ytick.labelsize'] = 8
# assign the values in plot_level to plot_data
for iy in range(dimY):
for ix in range(dimX):
if plot_data[iy,ix] <= np.min(plot_level):
plot_data2[iy,ix] = -1.
else:
plot_data2[iy,ix] = plot_level[np.where(plot_level <= plot_data[iy,ix])].max()
sb.set_xticks(x_ticks)
sb.set_xticklabels(x_names)
sb.set_yticks(y_ticks)
sb.set_yticklabels(y_names)
norm = BoundaryNorm(plot_level[1:], cmap.N)
a=sb.pcolor(plot_data2, edgecolors = 'k', linewidths=0.5, cmap = cmap, norm = norm)
a.cmap.set_under('w')
sb.set_xlabel('Spell duration [hrs]')
sb.set_ylabel('Peak rainfall [mm/hr]')
cax = fig.add_axes([0.2, -0.06, 0.6, 0.02])
cbar = plt.colorbar(a, cax=cax, orientation = 'horizontal', extend='both')
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(cbar_label)
if title:
fig.suptitle(title)
fig.savefig(output_file, dpi=600,bbox_inches='tight')
| apache-2.0 |
raghavrv/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 4 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
# #############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
# #############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
RobinD42/pyside | doc/inheritance_diagram.py | 10 | 12497 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:copyright: Copyright 2010-2011 by the PySide team.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.ext.graphviz import render_dot_html, render_dot_latex
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False, parts=0):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
builtins = __builtins__.values()
def recurse(cls):
if not show_builtins and cls in builtins:
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
baselist = []
all_classes[cls] = (nodename, fullname, baselist)
for base in cls.__bases__:
if not show_builtins and base in builtins:
continue
if base.__name__ == "Object" and base.__module__ == "Shiboken":
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return all_classes.values()
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [fullname for (_, fullname, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': 'Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for name, fullname, bases in self.class_info:
# Write the node
this_node_attrs = n_attrs.copy()
url = urls.get(fullname)
if url is not None:
this_node_attrs['URL'] = '"%s"' % url
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.temp_data.get('py:module'),
parts=node['parts'])
except InheritanceException, err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
| lgpl-2.1 |
neuroidss/nupic.research | projects/sequence_classification/run_encoder_only.py | 11 | 7720 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run sequence classification experiment with simple encoder model
1. Encode each element with RDSE encoder
2. Calculate prediction using kNN based on average overalap distance
3. Search for the optimal encoder resolution
"""
import pickle
import time
import matplotlib.pyplot as plt
import multiprocessing
from util_functions import *
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
return activeColumnsData
def calcualteEncoderModelWorker(taskQueue, resultQueue, *args):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
nBuckets = nextTask["nBuckets"]
accuracyColumnOnly = calculateEncoderModelAccuracy(nBuckets, *args)
resultQueue.put({nBuckets: accuracyColumnOnly})
print "Column Only model, Resolution: {} Accuracy: {}".format(
nBuckets, accuracyColumnOnly)
return
def calculateEncoderModelAccuracy(nBuckets, numCols, w, trainData, trainLabel):
maxValue = np.max(trainData)
minValue = np.min(trainData)
resolution = (maxValue - minValue) / nBuckets
encoder = RandomDistributedScalarEncoder(resolution, w=w, n=numCols)
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTrain,
trainLabel, trainLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
return accuracyColumnOnly
def searchForOptimalEncoderResolution(nBucketList, trainData, trainLabel, numCols, w):
numCPU = multiprocessing.cpu_count()
numWorker = numCPU
# Establish communication queues
taskQueue = multiprocessing.JoinableQueue()
resultQueue = multiprocessing.Queue()
for nBuckets in nBucketList:
taskQueue.put({"nBuckets": nBuckets})
for _ in range(numWorker):
taskQueue.put(None)
jobs = []
for i in range(numWorker):
print "Start process ", i
p = multiprocessing.Process(target=calcualteEncoderModelWorker,
args=(taskQueue, resultQueue, numCols, w, trainData, trainLabel))
jobs.append(p)
p.daemon = True
p.start()
while not taskQueue.empty():
time.sleep(0.1)
accuracyVsResolution = np.zeros((len(nBucketList,)))
while not resultQueue.empty():
exptResult = resultQueue.get()
nBuckets = exptResult.keys()[0]
accuracyVsResolution[nBucketList.index(nBuckets)] = exptResult[nBuckets]
return accuracyVsResolution
if __name__ == "__main__":
# datasetName = "SyntheticData"
# dataSetList = listDataSets(datasetName)
datasetName = 'UCR_TS_Archive_2015'
dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
for dataName in dataSetList:
trainData, trainLabel, testData, testLabel = loadDataset(dataName, datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel)
if max(numTrain, numTest) * sequenceLength > 600 * 600:
print "skip this large dataset for now"
continue
print
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
continue
except:
print "Search encoder parameters for this dataset"
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = []
for i in range(testData.shape[0]):
predictedClass = one_nearest_neighbor(trainData, trainLabel, testData[i,:])
correct = 1 if predictedClass == testLabel[i] else 0
outcomeEuclidean.append(correct)
# print "{} out of {} done outcome: {}".format(i, testData.shape[0], correct)
print
print "Euclidean model accuracy: {}".format(np.mean(outcomeEuclidean))
print
accuracyEuclideanDist = np.mean(outcomeEuclidean)
# # Use SDR overlap instead of Euclidean distance
print "Running Encoder model"
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
optimalResolution = searchResolution['optimalResolution']
except:
nBucketList = range(20, 200, 10)
accuracyVsResolution = searchForOptimalEncoderResolution(
nBucketList, trainData, trainLabel, numCols, w)
optNumBucket = nBucketList[np.argmax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue)/optNumBucket
searchResolution = {
'nBucketList': nBucketList,
'accuracyVsResolution': accuracyVsResolution,
'optimalResolution': optimalResolution
}
# save optimal resolution for future use
outputFile = open('results/optimalEncoderResolution/{}'.format(dataName), 'w')
pickle.dump(searchResolution, outputFile)
outputFile.close()
print "optimal bucket # {}".format((maxValue - minValue)/optimalResolution)
encoder = RandomDistributedScalarEncoder(optimalResolution, w=w, n=numCols)
print "encoding train data ..."
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
print "encoding test data ..."
activeColumnsTest = runEncoderOverDataset(encoder, testData)
print "calculate column distance matrix ..."
distMatColumnTest = calculateDistanceMat(activeColumnsTest, activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTest, trainLabel, testLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
print
print "Column Only model, Accuracy: {}".format(accuracyColumnOnly)
expResults = {'accuracyEuclideanDist': accuracyEuclideanDist,
'accuracyColumnOnly': accuracyColumnOnly,
'EuclideanDistanceMat': EuclideanDistanceMat,
'distMatColumnTest': distMatColumnTest}
outputFile = open('results/modelPerformance/{}_columnOnly'.format(dataName), 'w')
pickle.dump(expResults, outputFile)
outputFile.close()
| agpl-3.0 |
ryanraaum/african-mtdna | popdata_sources/beleza2005/process.py | 1 | 1365 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import sys
import csv
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region('16024-16569;1-340')
hids = []
hvr1 = []
hvr2 = []
sites = []
with open('beleza2005.csv', 'rU') as f:
reader = csv.reader(f)
reader.next() # skip past header
for row in reader:
for i in range(int(row[1])):
hids.append(row[0])
hvr1.append(str2sites(row[2], add16k=True))
hvr2.append(str2sites(row[3]))
for i in range(len(hids)):
sites.append(hvr1[i] + hvr2[i])
## Validate variant sites
passed_validation = True
for i in range(len(hids)):
seq = sites2seq(sites[i], region)
mysites = seq2sites(seq)
if not sites[i] == mysites:
if not translate(seq, None, '-') == translate(sites2seq(mysites, region), None, '-'):
passed_validation = False
print i
if passed_validation:
counter = 0
prefix = metadata.ix[0,'NewPrefix']
with open('processed.csv', 'w') as f:
for i in range(len(hids)):
counter = counter + 1
newid = prefix + str(counter).zfill(3)
seq = sites2seq(sites[i], region)
seq = translate(seq, None, '-')
mysites = seq2sites(seq)
mysites = ' '.join([str(x) for x in mysites])
f.write('%s,%s,%s\n' % (newid, hids[i], mysites)) | cc0-1.0 |
moorepants/BicycleID | bicycleid/plot.py | 1 | 8746 | import numpy as np
from matplotlib import rc
import matplotlib.figure as mpfig
import matplotlib.backends.backend_gtk as mpgtk
from dtk import control
import bicycleparameters as bp
class CoefficientPlot(object):
equations = [r'\dot{\phi}', r'\dot{\delta}', r'\ddot{\phi}', r'\ddot{\delta}']
states = [r'\phi', r'\delta', r'\dot{\phi}', r'\dot{\delta}']
xlabel = r'$v$ $\frac{m}{s}$'
xlim = (1.0, 10.0)
ylim = np.array([[-10., 30.],
[-60., 0.],
[-4., 2.],
[-3., 3.],
[-0.5, 0.5],
[-50., 300.],
[-175., 40.],
[-50., 50.],
[-40., 20.],
[-5., 15.]])
riderNames = ['Charlie', 'Jason', 'Luke']
def __init__(self):
rc('figure.subplot', wspace=0.4, hspace=0.4)
self.figure = mpfig.Figure(figsize=(6, 4), dpi=60)
self.axes = {}
self.title = self.figure.suptitle('')
for i in range(2, 4):
for j in range(4):
label = 'a' + str(i + 1) + str(j + 1)
ax = self.figure.add_subplot(2, 5, 5 * (i - 2) + j + 1)
self.axes[label] = ax
ax.set_title('$a_{' + self.equations[i] + self.states[j] + '}$')
ax.set_xlabel(self.xlabel)
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim[5 * (i - 2) + j])
for i, p in zip(range(2, 4), [5, 10]):
label = 'b' + str(i + 1) + str(1)
ax = self.figure.add_subplot(2, 5, p)
self.axes[label] = ax
ax.set_title('$b_{' + self.equations[i] + r'T_\delta' + '}$')
ax.set_xlabel(self.xlabel)
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim[p - 1])
self.lines = {}
for label, ax in self.axes.items():
self.lines[label + '-exp'] = ax.plot(self.xlim, [1., 1.], '.',
markersize=2)[0]
for rider in self.riderNames:
self.lines[label + '-mod-' + rider] = ax.plot(self.xlim, [1., 1.])[0]
self.canvas = mpgtk.FigureCanvasGTK(self.figure)
self.canvas.show()
def update_graph(self, exp, mod):
"""Sets the data in the plot with respect to the provided experimental
and model data sets.
Parameters
----------
exp : pandas.DataFrame
A data frame containing the experimental data.
mod : dictionary
A dictionary of pandas.DataFrame objects containing the data for
each rider.
"""
self.title.set_text('Number of experiments: {}'.format(len(exp)))
for name, line in self.lines.items():
try:
label, typ, rider = name.split('-')
except ValueError:
label, typ = name.split('-')
if typ == 'exp':
line.set_data(exp['ActualSpeed'], exp[label])
elif typ == 'mod':
try:
line.set_data(mod[rider]['Speed'], mod[rider][label])
except KeyError:
line.set_data([np.nan], [np.nan])
class BodePlot(object):
inputNames = [r'$T_\delta$']
outputNames = [r'$\phi$', r'$\delta$']
stateNames = [r'$\phi$', r'$\delta$', r'$\dot{\phi}$', r'$\dot{\delta}$']
systemNames = ['Experimental Mean', 'Experimental Upper Uncertainty',
'Experimental Lower Uncertainty', 'Charlie', 'Jason', 'Luke']
colors = ['b', 'b', 'b', 'r', 'g', 'm']
linestyles = ['-', '--', '--', '-', '-', '-']
def __init__(self, w):
A = np.array([[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ],
[ 8.24494689, -3.53782098, -0.06418077, -0.53855055],
[ 18.84406429, 31.38819183, 3.50835159, -7.18282895]])
B = np.array([[ 0. ],
[ 0. ],
[-0.1018712 ],
[ 5.56864583]])
C = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.]])
D = np.zeros((2, 1))
self.systems = []
for name in self.systemNames:
self.systems.append(control.StateSpace(A, B, C, D, name=name,
inputNames=self.inputNames, outputNames=self.outputNames,
stateNames=self.stateNames))
self.w = w
self.bode = control.Bode(w, *self.systems, colors=self.colors,
linestyles=self.linestyles)
self.bode.mag_phase()
self.bode.plot()
self.canvases = []
for fig in self.bode.figs:
canvas = mpgtk.FigureCanvasGTK(fig)
self.canvases.append(canvas)
canvas.show()
def update_graph(self, bodeData, models):
"""Updates the Bode plot based on the provided data.
Parameters
----------
bodeData : tuple
The mean and standard deviation of the magnitude and phase and the
mean speed for the set of runs.
models : dictionary
A dictionary of models for each rider.
"""
meanMag, stdMag, meanPhase, stdPhase, meanSpeed, stdSpeed = bodeData
meanMagPlus = meanMag + stdMag
meanMagMinus = meanMag - stdMag
# steer torque to roll angle
phiPlot = self.bode.figs[0]
phiPlot.magAx.lines[0].set_ydata(meanMag[:, 0, 0])
phiPlot.magAx.lines[1].set_ydata(meanMagPlus[:, 0, 0])
phiPlot.magAx.lines[2].set_ydata(meanMagMinus[:, 0, 0])
phiPlot.magAx.set_ylim((-100, 50))
phiPlot.phaseAx.lines[0].set_ydata(meanPhase[:, 0, 0])
phiPlot.phaseAx.lines[1].set_ydata(meanPhase[:, 0, 0] + stdPhase[:, 0, 0])
phiPlot.phaseAx.lines[2].set_ydata(meanPhase[:, 0, 0] - stdPhase[:, 0, 0])
phiPlot.phaseAx.set_ylim((-360, 0))
# steer torque to steer angle
deltaPlot = self.bode.figs[1]
deltaPlot.magAx.lines[0].set_ydata(meanMag[:, 1, 0])
deltaPlot.magAx.lines[1].set_ydata(meanMagPlus[:, 1, 0])
deltaPlot.magAx.lines[2].set_ydata(meanMagMinus[:, 1, 0])
deltaPlot.magAx.set_ylim((-100, 50))
deltaPlot.phaseAx.lines[0].set_ydata(meanPhase[:, 1, 0])
deltaPlot.phaseAx.lines[1].set_ydata(meanPhase[:, 1, 0] + stdPhase[:, 1, 0])
deltaPlot.phaseAx.lines[2].set_ydata(meanPhase[:, 1, 0] - stdPhase[:, 1, 0])
deltaPlot.phaseAx.set_ylim((-360, 0))
for rider in ['Charlie', 'Jason', 'Luke']:
try:
mod = models[rider]
except KeyError:
# if the rider isn't there, don't plot the lines
lenW = len(deltaPlot.magAx.lines[0].get_xdata())
mag = np.nan * np.ones((lenW, 2))
phase = np.nan * np.ones((lenW, 2))
else:
mag, phase = mod.magnitude_phase(meanSpeed, self.w)
mag = 20. * np.log10(mag)
phase = np.rad2deg(phase)
for i, p in enumerate(phase.T):
if p[0] > 0.:
phase[:, i] = phase[:, i] - 360.
phiPlot.magAx.lines[self.systemNames.index(rider)].set_ydata(mag[:, 0])
phiPlot.phaseAx.lines[self.systemNames.index(rider)].set_ydata(phase[:, 0])
deltaPlot.magAx.lines[self.systemNames.index(rider)].set_ydata(mag[:, 1])
deltaPlot.phaseAx.lines[self.systemNames.index(rider)].set_ydata(phase[:, 1])
class RootLociPlot(object):
def __init__(self, models, expSpeed, eig, speed):
# the plot should have eigenvalues for each rider in a different color
# and plot dots for the eigenvalues from the model
self.fig = bp.plot_eigenvalues([mod.bicycle for mod in models.values()], speed)
# plot eigenvalues of the experimental models
self.ax = self.fig.axes[0]
self.ax.set_ylim((-10., 10.))
#self.real = self.ax.plot(expSpeed, np.real(eig), '.k')
self.real = []
for eigs in eig.T:
eigWithImag = abs(np.imag(eigs)) > 1e-10
self.real.append(self.ax.scatter(expSpeed, np.real(eigs),
c=eigWithImag))
self.imag = self.ax.plot(expSpeed, abs(np.imag(eig)), 'ob')
self.canvas = mpgtk.FigureCanvasGTK(self.fig)
self.canvas.show()
def update_plot(self, speed, eig):
for i, line in enumerate(self.real):
xy = np.vstack((speed, np.real(eig[:, i]))).T
line.set_offsets(xy)
for i, line in enumerate(self.imag):
line.set_data(speed, abs(np.imag(eig[:, i])))
| bsd-2-clause |
luo66/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
project-asap/IReS-Platform | asap-tools/monitoring/lib/tools.py | 1 | 2356 | __author__ = 'cmantas'
import matplotlib.pyplot as plt
from ast import literal_eval
from time import time, sleep
from json import load, dumps
from os.path import isfile
try:
plt.style.use('fivethirtyeight')
except:
# print "You could get prettier graphs with matplotlib > 1.4"
pass
from matplotlib.pyplot import figure, show
def myplot(*args, **kwargs):
if "title" in kwargs:
title = kwargs["title"]
del(kwargs["title"])
plt.title(title)
if "xlabel" in kwargs:
xlabel = kwargs["xlabel"]
del(kwargs["xlabel"])
plt.xlabel(xlabel)
if "ylabel" in kwargs:
ylabel = kwargs["ylabel"]
del(kwargs["ylabel"])
plt.ylabel(ylabel)
plt.grid(True)
# plt.grid(which='both')
# plt.grid(which='minor', alpha=0.2)
plt.plot(*args, **kwargs)
plt.legend(loc = 'upper left')
def mycast(a):
"""
given a string, it returns its casted value to the correct type or the string itself if it can't be evaluated
if the input is a list or a dict it recursively calls itself on the input collection's (keys and) values
:param a: the input string
:return: the evaluated 'casted' result
"""
if isinstance(a, dict):
return dict(map(lambda (k, v): (mycast(k),mycast(v)), a.iteritems()))
elif isinstance(a, list):
return map(mycast, a)
else:
try:
return literal_eval(a)
except:
return a
def wait_for_file(filepath, timeout):
""" Keep waiting for a file to appear unless a timeout is reached
:param filepath:
:param timeout: the time needed to give up (default: 3sec)
:return: void
"""
end_time= time() + timeout
#wait
while not isfile(filepath) and time()<end_time:
sleep(0.2)
# if after wait no file then trouble
if not isfile(filepath):
print "ERROR: waited for monitoring data file, but timed out"
exit()
def _collect_json(metrics_file, timeout=3):
try:
# wait for the metrics file to be created (timeout secs)
if timeout: wait_for_file(metrics_file, timeout)
# collect the saved metrics from metrics file
with open(metrics_file) as f:
metrics = load(f)
return metrics
except:
#print 'Could not collect the metrics'
return {}
| apache-2.0 |
arabenjamin/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/tests/test_streamplot.py | 9 | 2067 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.transforms as mtransforms
def velocity_field():
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
return X, Y, U, V
@image_comparison(baseline_images=['streamplot_colormap_test_image'])
def test_colormap():
X, Y, U, V = velocity_field()
plt.streamplot(X, Y, U, V, color=U, density=0.6, linewidth=2,
cmap=plt.cm.autumn)
plt.colorbar()
@image_comparison(baseline_images=['streamplot_linewidth_test_image'])
def test_linewidth():
X, Y, U, V = velocity_field()
speed = np.sqrt(U*U + V*V)
lw = 5*speed/speed.max()
df = 25. / 30. # Compatibility factor for old test image
plt.streamplot(X, Y, U, V, density=[0.5 * df, 1. * df], color='k',
linewidth=lw)
@image_comparison(baseline_images=['streamplot_masks_and_nans_test_image'])
def test_masks_and_nans():
X, Y, U, V = velocity_field()
mask = np.zeros(U.shape, dtype=bool)
mask[40:60, 40:60] = 1
U = np.ma.array(U, mask=mask)
U[:20, :20] = np.nan
with np.errstate(invalid='ignore'):
plt.streamplot(X, Y, U, V, color=U, cmap=plt.cm.Blues)
@cleanup
def test_streamplot_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
if __name__=='__main__':
import nose
nose.runmodule()
| lgpl-3.0 |
drewUCL/Greengraph | Greengraph/test/test_graph.py | 1 | 3209 | from nose.tools import assert_almost_equal, assert_in, assert_equal, assert_false, assert_true
from ..graph import Greengraph
from ..map import Map
from matplotlib import image as img #change
import os
import yaml
import numpy
import geopy
import requests
import mock
@mock.patch.object(geopy.geocoders, 'GoogleV3')
def test_Greengraph(mock_geocoders):
'''
Description: A function to test the initialisation of Greengraph
Data Source: Within Function
'''
userInput = Greengraph('London','Glasgow')
mock_geocoders.assert_equal(userInput.start, 'London')
mock_geocoders.assert_equal(userInput.end, 'Glasgow')
@mock.patch.object(geopy.geocoders.GoogleV3, 'geocode')
def test_geolocate(mock_geocode):
'''
Description: A function to test multiple locations and analyse the return of geolocate
Data Source: YAML
'''
tObject = Greengraph(0.0,0.0) #initiate the Greengraph object to access geolocate
with open(os.path.join(os.path.dirname(__file__),'fixtures','graph_test_data.yaml')) as data:
test_data = yaml.load(data)['test_geolocate']
for point in test_data:
city = point.pop('city')
latitude = point.pop('latitude')
longitude = point.pop('longitude')
mock_geocode.assert_equal(tObject.geolocate(city),(latitude,longitude))
def test_location_sequence():
'''
Description: A function to test the order of co-ordinates used to generate a path that will allow the image to be generated and then decomposed to analyse the green pixel density.
Data Source: YAML
'''
tObject = Greengraph(0.0,0.0) #initiate the Greengraph object to access geolocate
with open(os.path.join(os.path.dirname(__file__),'fixtures','graph_test_data.yaml')) as data:
test_data = yaml.load(data)['test_location_sequence']
for point in test_data:
tFrom = point.pop('from_point')
tTo = point.pop('to_point')
tSteps = point.pop('steps')
tLocationMatrix = point.pop('location_matrix')
tResult = tObject.location_sequence(tObject.geolocate(tFrom),tObject.geolocate(tTo),tSteps)
for row in range(0,len(tResult)):
for element in range(0,len(tResult[row])):
assert_almost_equal(tResult[row][element],tLocationMatrix[row][element])
@mock.patch.object(img, 'imread')
@mock.patch.object(requests, 'get')
@mock.patch.object(Greengraph, 'geolocate')
@mock.patch.object(Map, 'count_green')
def test_green_between( mock_geolocate, mock_count_green, mock_imread, mock_get ):
'''
Description: This function tests and Mocks the behavour
Data Source: YAML
'''
with open(os.path.join(os.path.dirname(__file__),'fixtures','graph_test_data.yaml')) as data:
test_data = yaml.load(data)['test_green_between']
for point in test_data:
tFrom = point.pop('from_point')
tFromCoord = point.pop('from_locations')
tTo = point.pop('to_point')
tToCoord = point.pop('to_location')
tSteps = point.pop('steps')
tReturnVector = point.pop('green_vector')
#Mock geoloacte and count_green as we only want to test an isolated event of green_between
mock_count_green.side_effect = [tFromCoord,tToCoord]
mock_geolocate.side_effect = tReturnVector
#Get test pixel result
gPixel = Greengraph(tFrom,tTo).green_between(tSteps)
assert_equal(tReturnVector,gPixel)
| mit |
Lawrence-Liu/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
IssamLaradji/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 34 | 2573 | import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from .test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
| bsd-3-clause |
charanpald/wallhack | wallhack/modelselect/ProcessResults.py | 1 | 20493 | import numpy
import logging
import sys
import scipy.stats
from sandbox.util.Latex import Latex
from sandbox.util.PathDefaults import PathDefaults
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
import matplotlib
matplotlib.use("GTK3Agg")
import matplotlib.pyplot as plt
#Produce latex tables from the benchmark results
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def getIdealWins(errors, testErrors, p=0.01):
"""
Figure out whether the ideal error obtained using the test set is an improvement
over model selection using CV.
"""
winsShape = list(errors.shape[1:-1])
winsShape.append(3)
stdWins = numpy.zeros(winsShape, numpy.int)
for i in range(len(sampleSizes)):
for j in range(foldsSet.shape[0]):
s1 = errors[:, i, j, 0]
s2 = testErrors[:, i]
s1Mean = numpy.mean(s1)
s2Mean = numpy.mean(s2)
t, prob = scipy.stats.wilcoxon(s1, s2)
if prob < p:
if s1Mean > s2Mean:
stdWins[i, j, 2] = 1
elif s1Mean < s2Mean:
stdWins[i, j, 0] = 1
else:
print("Test draw samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
stdWins[i, j, 1] = 1
return stdWins
def getWins(errors, p = 0.01):
"""
Want to compute the number of wins/ties/losses versus CV
"""
#Shape is realisations, len(sampleSizes), foldsSet.shape[0], numMethods
winsShape = list(errors.shape[1:])
winsShape.append(3)
stdWins = numpy.zeros(winsShape, numpy.int)
meanErrors = numpy.mean(errors, 0)
for i in range(len(sampleSizes)):
for j in range(foldsSet.shape[0]):
for k in range(meanErrors.shape[2]):
s1 = errors[:, i, j, 0]
s2 = errors[:, i, j, k]
s1Mean = numpy.mean(s1)
s2Mean = numpy.mean(s2)
t, prob = scipy.stats.wilcoxon(s1, s2)
if prob < p:
if s1Mean > s2Mean:
if k==4 and j==0:
print("PenVF win \\alpha=1.0, samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
if k==1 and j==0:
print("PenVF+ win samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
stdWins[i, j, k, 2] = 1
elif s1Mean < s2Mean:
if k==4 and j==0:
print("PenVF loss \\alpha=1.0, samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
if k==1 and j==0:
print("PenVF+ loss samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
stdWins[i, j, k, 0] = 1
else:
stdWins[i, j, k, 1] = 1
return stdWins
def getRowNames(cvScalings, idealError=False):
"""
Return a lost of the method types.
"""
rowNames = [""]
for j in range(sampleSizes.shape[0]):
rowNames.append("Std" + " $m=" + str(sampleSizes[j]) + "$")
rowNames.append("PenVF+" + " $m=" + str(sampleSizes[j]) + "$")
for k in range(cvScalings.shape[0]):
rowNames.append("PenVF" + " $m=" + str(sampleSizes[j]) + "$ $\\alpha=" + str(cvScalings[k]) + "$")
if idealError:
rowNames.append("Test $m=" + str(sampleSizes[j]) + "$")
return rowNames
def getLatexTable(measures, cvScalings, idealMeasures):
rowNames = getRowNames(cvScalings, True)
table = Latex.array1DToRow(foldsSet) + "\\\\ \n"
for j in range(sampleSizes.shape[0]):
meanMeasures = numpy.mean(measures, 0)
stdMeasures = numpy.std(measures, 0)
table += Latex.array2DToRows(meanMeasures[j, :, :].T, stdMeasures[j, :, :].T) + "\n"
meanIdealMeasures = numpy.mean(idealMeasures, 0)
stdIdealMeasures = numpy.std(idealMeasures, 0)
table += Latex.array2DToRows(numpy.ones((1, len(foldsSet)))*meanIdealMeasures[j], numpy.ones((1, len(foldsSet)))*stdIdealMeasures[j]) + "\n"
table = Latex.addRowNames(rowNames, table)
return table, meanMeasures, stdMeasures
def summary(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix, gridResultsSuffix="GridResults"):
"""
Print the errors for all results plus a summary.
"""
numMethods = (1+(cvScalings.shape[0]+1))
numDatasets = len(datasetNames)
overallErrors = numpy.zeros((numDatasets, len(sampleMethods), sampleSizes.shape[0], foldsSet.shape[0], numMethods))
overallStdWins = numpy.zeros((len(sampleMethods), len(sampleSizes), foldsSet.shape[0], numMethods+1, 3), numpy.int)
overallErrorsPerSampMethod = numpy.zeros((numDatasets, len(sampleMethods), len(sampleSizes), numMethods), numpy.float)
table1 = ""
table2 = ""
table3 = ""
for i in range(len(datasetNames)):
table3Error = numpy.zeros((2, len(sampleMethods)))
table3Stds = numpy.zeros((2, len(sampleMethods)))
for j in range(len(sampleMethods)):
print("="*50 + "\n" + datasetNames[i] + "-" + sampleMethods[j] + "\n" + "="*50 )
outfileName = outputDir + datasetNames[i] + sampleMethods[j] + fileNameSuffix + ".npz"
try:
data = numpy.load(outfileName)
errors = data["arr_0"]
params = data["arr_1"]
meanErrorGrids = data["arr_2"]
stdErrorGrids = data["arr_3"]
meanApproxGrids = data["arr_4"]
stdApproxGrids = data["arr_5"]
#Load ideal results
outfileName = outputDir + datasetNames[i] + gridResultsSuffix + ".npz"
data = numpy.load(outfileName)
idealErrors = data["arr_0"]
errorTable, meanErrors, stdErrors = getLatexTable(errors, cvScalings, idealErrors)
wins = getWins(errors)
idealWins = getIdealWins(errors, idealErrors)
excessError = numpy.zeros(errors.shape)
for k in range(errors.shape[1]):
excessError[:, k, :, :] = errors[:, k, :, :] - numpy.tile(errors[:, k, :, 0, numpy.newaxis], (1, 1, numMethods))
meanExcessError = numpy.mean(excessError, 0)
stdExcessError = numpy.std(excessError, 0)
excessErrorTable, meanExcessErrors, stdExcessErrors = getLatexTable(excessError, cvScalings, idealErrors)
overallErrorsPerSampMethod[i, j, :, :] = numpy.mean(meanErrors, 1)
overallErrors[i, j, :, :, :] = meanExcessError
overallStdWins[j, :, :, 0:-1, :] += wins
overallStdWins[j, :, :, -1, :] += idealWins
print(errorTable)
#print("Min error is: " + str(numpy.min(meanErrors)))
#print("Max error is: " + str(numpy.max(meanErrors)))
#print("Mean error is: " + str(numpy.mean(meanErrors)) + "\n")
#This is a table with V=10, alpha=1 and CV sampling
sliceFoldIndex = 0
print(meanErrors[0, 1, 0])
numSliceMethods = 3
table1Error = numpy.zeros(len(sampleSizes)*numSliceMethods)
table1Std = numpy.zeros(len(sampleSizes)*numSliceMethods)
for k in range(len(sampleSizes)):
table1Error[k*numSliceMethods] = meanErrors[k, sliceFoldIndex, 0]
table1Error[k*numSliceMethods+1] = meanErrors[k, sliceFoldIndex, 1]
table1Error[k*numSliceMethods+2] = meanErrors[k, sliceFoldIndex, 4]
table1Std[k*numSliceMethods] = stdErrors[k, sliceFoldIndex, 0]
table1Std[k*numSliceMethods+1] = stdErrors[k, sliceFoldIndex, 1]
table1Std[k*numSliceMethods+2] = stdErrors[k, sliceFoldIndex, 4]
if j == 0:
table1 += datasetNames[i] + " & " + Latex.array2DToRows(numpy.array([table1Error]), numpy.array([table1Std])) + "\n"
#See how alpha varies with V=10, CV sampling
table2Error = numpy.zeros(range(numMethods-2))
table2Std = numpy.zeros(range(numMethods-2))
for s in range(len(sampleSizes)):
table2Error = meanErrors[s, sliceFoldIndex, 2:]
table2Std = stdErrors[s, sliceFoldIndex, 2:]
if j == 0:
table2 += datasetNames[i] + " $m=" + str(sampleSizes[s]) + "$ & " + Latex.array2DToRows(numpy.array([table2Error]), numpy.array([table2Std])) + "\n"
"""
#See how each sample method effects CV and pen alpha=1
fourFoldIndex = 4
hundredMIndex = 1
table3Error[0, j] = meanErrors[hundredMIndex, fourFoldIndex, 0]
table3Error[1, j] = meanErrors[hundredMIndex, fourFoldIndex, 3]
table3Stds[0, j] = stdErrors[hundredMIndex, fourFoldIndex, 0]
table3Stds[1, j] = stdErrors[hundredMIndex, fourFoldIndex, 3]
"""
except IOError:
print("Failed to open file: " + outfileName)
table3 += Latex.addRowNames([datasetNames[i] + " Std ", datasetNames[i] + " PenVF "], Latex.array2DToRows(table3Error, table3Stds))
datasetMeanErrors = Latex.listToRow(sampleMethods) + "\n"
for j in range(len(sampleSizes)):
datasetMeanErrors += Latex.array2DToRows(overallErrorsPerSampMethod[i, :, j, :].T) + "\n"
datasetMeanErrors = Latex.addRowNames(getRowNames(cvScalings), datasetMeanErrors)
print(datasetMeanErrors)
print("="*50 + "\n" + "Sliced Tables" + "\n" + "="*50)
print(table1 + "\n")
print(table2 + "\n")
print(table3)
print("="*50 + "\n" + "Overall" + "\n" + "="*50)
overallMeanErrors = numpy.mean(overallErrors, 0)
overallStdErrors = numpy.std(overallErrors, 0)
for i in range(len(sampleMethods)):
print("-"*20 + sampleMethods[i] + "-"*20)
overallErrorTable = Latex.array1DToRow(foldsSet) + "\\\\ \n"
overallWinsTable = Latex.array1DToRow(foldsSet) + " & Total & " +Latex.array1DToRow(foldsSet) + " & Total \\\\ \n"
rowNames = getRowNames(cvScalings)
for j in range(sampleSizes.shape[0]):
overallErrorTable += Latex.array2DToRows(overallMeanErrors[i, j, :, :].T, overallStdErrors[i, j, :, :].T, bold=overallMeanErrors[i, j, :, :].T<0) + "\n"
tiesWins = numpy.r_[overallStdWins[i, j, :, :, 0], overallStdWins[i, j, :, :, 1], overallStdWins[i, j, :, :, 2]]
overallWinsTable += Latex.array2DToRows(tiesWins.T) + "\n"
overallErrorTable = Latex.addRowNames(rowNames, overallErrorTable)
rowNames = getRowNames(cvScalings, True)
overallWinsTable = Latex.addRowNames(rowNames, overallWinsTable)
print(Latex.latexTable(overallWinsTable, "Wins for " + sampleMethods[i], True))
print(Latex.latexTable(overallErrorTable.replace("0.", "."), "Excess errors for " + sampleMethods[i], True))
#print(overallWinsTable)
#print(overallErrorTable)
#Now print the mean errors for all datasets
datasetMeanErrors = Latex.listToRow(sampleMethods) + "\n"
overallErrorsPerSampMethod = numpy.mean(overallErrorsPerSampMethod[:, :, :, :], 0)
for j in range(len(sampleSizes)):
datasetMeanErrors += Latex.array2DToRows(overallErrorsPerSampMethod[:, j, :].T) + "\n"
datasetMeanErrors = Latex.addRowNames(getRowNames(cvScalings), datasetMeanErrors)
print(datasetMeanErrors)
def plotResults(datasetName, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix):
"""
Plots the errors for a particular dataset on a bar graph.
"""
for k in range(len(sampleMethods)):
outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz"
data = numpy.load(outfileName)
errors = data["arr_0"]
meanMeasures = numpy.mean(errors, 0)
for i in range(sampleSizes.shape[0]):
plt.figure(k*len(sampleMethods) + i)
plt.title("n="+str(sampleSizes[i]) + " " + sampleMethods[k])
for j in range(errors.shape[3]):
plt.plot(foldsSet, meanMeasures[i, :, j])
plt.xlabel("Folds")
plt.ylabel('Error')
labels = ["VFCV", "PenVF+"]
labels.extend(["VFP s=" + str(x) for x in cvScalings])
plt.legend(tuple(labels))
plt.show()
def plotAlphas(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix):
"""
Plot the variation in the error with alpha for penalisation.
"""
for i, datasetName in enumerate(datasetNames):
#plt.figure(i)
for k in range(len(sampleMethods)):
outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz"
data = numpy.load(outfileName)
errors = data["arr_0"]
meanMeasures = numpy.mean(errors, 0)
foldInd = 4
for i in range(sampleSizes.shape[0]):
plt.plot(cvScalings, meanMeasures[i, foldInd, 2:8], next(linecycler), label="m="+str(sampleSizes[i]))
plt.xlabel("Alpha")
plt.ylabel('Error')
xmin, xmax = cvScalings[0], cvScalings[-1]
plt.xlim((xmin,xmax))
plt.legend(loc="upper left")
plt.show()
def plotPenalty(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix):
"""
Plot the approximated versus ideal penalty.
"""
gammas = numpy.array(numpy.round(2**numpy.arange(1, 7.5, 0.5)-1), dtype=numpy.int)
for i, datasetName in enumerate(datasetNames):
outfileName = outputDir + datasetNames[i] + "GridResults.npz"
data = numpy.load(outfileName)
meanIdealPenGrids = data["arr_4"]
for k in range(len(sampleMethods)):
outfileName = outputDir + datasetNames[i] + sampleMethods[k] + "Results.npz"
data = numpy.load(outfileName)
meanApproxGrids = data["arr_4"]
foldInd = 0
methodInd1 = 1
methodInd2 = 4
for i in range(sampleSizes.shape[0]-1):
plt.figure(i)
linecycler = cycle(lines)
idealGrid = meanIdealPenGrids[i, :]
approxGrid1 = meanApproxGrids[i, foldInd, methodInd1, :]
approxGrid2 = meanApproxGrids[i, foldInd, methodInd2, :]
plt.plot(numpy.log2(gammas), idealGrid, next(linecycler), label="Ideal " )
plt.plot(numpy.log2(gammas), approxGrid1, next(linecycler), label="PenVF+ " )
plt.plot(numpy.log2(gammas), approxGrid2, next(linecycler), label="PenVF " )
plt.xlabel("log(t)")
plt.ylabel('Penalty')
plt.legend(loc="upper left")
plt.show()
def plotErrorGrids(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix):
"""
Plot the ideal, CV, PenVF errors.
"""
gammas = numpy.array(numpy.round(2**numpy.arange(1, 7.5, 0.5)-1), dtype=numpy.int)
for i, datasetName in enumerate(datasetNames):
plt.figure(i)
outfileName = outputDir + datasetNames[i] + "GridResults.npz"
data = numpy.load(outfileName)
meanIdealErrorGrids = data["arr_2"]
for k in range(len(sampleMethods)):
outfileName = outputDir + datasetNames[i] + sampleMethods[k] + "Results.npz"
data = numpy.load(outfileName)
meanErrorGrids = data["arr_2"]
foldInd = 0
methodInd1 = 0
methodInd2 = 1
methodInd3 = 4
for i in range(sampleSizes.shape[0]-2):
idealGrid = meanIdealErrorGrids[i, :]
print()
print("min ideal = " + str(gammas[numpy.argmin(idealGrid)]))
print("min VFCV = " + str(gammas[numpy.argmin(meanErrorGrids[i, foldInd, methodInd1, :])]))
print("min PenVF+ = " + str(gammas[numpy.argmin(meanErrorGrids[i, foldInd, methodInd2, :])]))
print("min PenVF = " + str(gammas[numpy.argmin(meanErrorGrids[i, foldInd, methodInd3, :])]))
approxGrid1 = meanErrorGrids[i, foldInd, methodInd1, :]
approxGrid2 = meanErrorGrids[i, foldInd, methodInd2, :]
approxGrid3 = meanErrorGrids[i, foldInd, methodInd3, :]
plt.plot(numpy.log2(gammas), idealGrid, next(linecycler), label="Ideal ")
plt.plot(numpy.log2(gammas), approxGrid1, next(linecycler), label="CV " )
plt.plot(numpy.log2(gammas), approxGrid2, next(linecycler), label="PenVF+ " )
plt.plot(numpy.log2(gammas), approxGrid3, next(linecycler), label="PenVF " )
plt.xlabel("log(t)")
plt.ylabel('Error')
plt.legend(loc="lower left")
plt.show()
showCART = True
showSVR = False
from itertools import cycle
lines = ["k-","k--","k-.","k:","k-x", "k-+"]
linecycler = cycle(lines)
if showSVR:
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/SVR/"
sampleSizes = numpy.array([50, 100, 200])
sampleMethods = ["CV"]
cvScalings = numpy.arange(0.6, 1.61, 0.2)
foldsSet = numpy.arange(2, 13, 2)
datasetNames = ModelSelectUtils.getRegressionDatasets()
fileNameSuffix = 'Results'
summary(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
plotDatasetNames = [datasetNames[7]]
plotAlphas(plotDatasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
sampleSizes = numpy.array([25, 50, 100])
sampleMethods = ["CV"]
cvScalings = numpy.arange(0.6, 1.61, 0.2)
foldsSet = numpy.arange(10, 51, 10)
datasetNames = ModelSelectUtils.getRegressionDatasets()
fileNameSuffix = "ResultsExt"
summary(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix, "GridResultsExt")
if showCART:
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
#First output the fine grained results
sampleSizes = numpy.array([50, 100, 200])
sampleMethods = ["CV"]
cvScalings = numpy.arange(0.6, 1.61, 0.2)
foldsSet = numpy.arange(2, 13, 2)
datasetNames = ModelSelectUtils.getRegressionDatasets()
fileNameSuffix = 'Results'
summary(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
print(datasetNames)
plotDatasetNames = [datasetNames[7]]
plotAlphas(plotDatasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
#plotDatasetNames = [datasetNames[0]]
#plotPenalty(plotDatasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
#plotDatasetNames = [datasetNames[0]]
#plotErrorGrids(plotDatasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
#plotResults("add10", sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix)
#Now run some extended results
sampleSizes = numpy.array([500])
sampleMethods = ["CV"]
cvScalings = numpy.arange(0.6, 1.61, 0.2)
foldsSet = numpy.arange(2, 13, 2)
datasetNames = ModelSelectUtils.getRegressionDatasets()
fileNameSuffix = "ResultsExt"
summary(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix, "GridResultsExt")
| gpl-3.0 |
JosmanPS/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
AndreasMadsen/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 11 | 2404 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(tf.test.TestCase):
def testPandasInputFn(self):
if not HAS_PANDAS:
return
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y_noindex = pd.Series(np.arange(-32, -28))
y = pd.Series(np.arange(-32, -28), index=index)
with self.test_session() as session:
with self.assertRaises(ValueError):
failing_input_fn = pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['index'], [100, 101])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
hainm/statsmodels | examples/python/regression_plots.py | 33 | 9585 |
## Regression Plots
from __future__ import print_function
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import ols
### Duncan's Prestige Dataset
#### Load the Data
# We can use a utility function to load any R dataset available from the great <a href="http://vincentarelbundock.github.com/Rdatasets/">Rdatasets package</a>.
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
prestige.head()
prestige_model = ols("prestige ~ income + education", data=prestige).fit()
print(prestige_model.summary())
#### Influence plots
# Influence plots show the (externally) studentized residuals vs. the leverage of each observation as measured by the hat matrix.
#
# Externally studentized residuals are residuals that are scaled by their standard deviation where
#
# $$var(\\hat{\epsilon}_i)=\hat{\sigma}^2_i(1-h_{ii})$$
#
# with
#
# $$\hat{\sigma}^2_i=\frac{1}{n - p - 1 \;\;}\sum_{j}^{n}\;\;\;\forall \;\;\; j \neq i$$
#
# $n$ is the number of observations and $p$ is the number of regressors. $h_{ii}$ is the $i$-th diagonal element of the hat matrix
#
# $$H=X(X^{\;\prime}X)^{-1}X^{\;\prime}$$
#
# The influence of each point can be visualized by the criterion keyword argument. Options are Cook's distance and DFFITS, two measures of influence.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(prestige_model, ax=ax, criterion="cooks")
# As you can see there are a few worrisome observations. Both contractor and reporter have low leverage but a large residual. <br />
# RR.engineer has small residual and large leverage. Conductor and minister have both high leverage and large residuals, and, <br />
# therefore, large influence.
#### Partial Regression Plots
# Since we are doing multivariate regressions, we cannot just look at individual bivariate plots to discern relationships. <br />
# Instead, we want to look at the relationship of the dependent variable and independent variables conditional on the other <br />
# independent variables. We can do this through using partial regression plots, otherwise known as added variable plots. <br />
#
# In a partial regression plot, to discern the relationship between the response variable and the $k$-th variabe, we compute <br />
# the residuals by regressing the response variable versus the independent variables excluding $X_k$. We can denote this by <br />
# $X_{\sim k}$. We then compute the residuals by regressing $X_k$ on $X_{\sim k}$. The partial regression plot is the plot <br />
# of the former versus the latter residuals. <br />
#
# The notable points of this plot are that the fitted line has slope $\beta_k$ and intercept zero. The residuals of this plot <br />
# are the same as those of the least squares fit of the original model with full $X$. You can discern the effects of the <br />
# individual data values on the estimation of a coefficient easily. If obs_labels is True, then these points are annotated <br />
# with their observation label. You can also see the violation of underlying assumptions such as homooskedasticity and <br />
# linearity.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("prestige", "income", ["income", "education"], data=prestige, ax=ax)
ax = fig.axes[0]
ax.set_xlim(-2e-15, 1e-14)
ax.set_ylim(-25, 30);
fix, ax = plt.subplots(figsize=(12,14))
fig = sm.graphics.plot_partregress("prestige", "income", ["education"], data=prestige, ax=ax)
# As you can see the partial regression plot confirms the influence of conductor, minister, and RR.engineer on the partial relationship between income and prestige. The cases greatly decrease the effect of income on prestige. Dropping these cases confirms this.
subset = ~prestige.index.isin(["conductor", "RR.engineer", "minister"])
prestige_model2 = ols("prestige ~ income + education", data=prestige, subset=subset).fit()
print(prestige_model2.summary())
# For a quick check of all the regressors, you can use plot_partregress_grid. These plots will not label the <br />
# points, but you can use them to identify problems and then use plot_partregress to get more information.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(prestige_model, fig=fig)
#### Component-Component plus Residual (CCPR) Plots
# The CCPR plot provides a way to judge the effect of one regressor on the <br />
# response variable by taking into account the effects of the other <br />
# independent variables. The partial residuals plot is defined as <br />
# $\text{Residuals} + B_iX_i \text{ }\text{ }$ versus $X_i$. The component adds $B_iX_i$ versus <br />
# $X_i$ to show where the fitted line would lie. Care should be taken if $X_i$ <br />
# is highly correlated with any of the other independent variables. If this <br />
# is the case, the variance evident in the plot will be an underestimate of <br />
# the true variance.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_ccpr(prestige_model, "education", ax=ax)
# As you can see the relationship between the variation in prestige explained by education conditional on income seems to be linear, though you can see there are some observations that are exerting considerable influence on the relationship. We can quickly look at more than one variable by using plot_ccpr_grid.
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_ccpr_grid(prestige_model, fig=fig)
#### Regression Plots
# The plot_regress_exog function is a convenience function that gives a 2x2 plot containing the dependent variable and fitted values with confidence intervals vs. the independent variable chosen, the residuals of the model vs. the chosen independent variable, a partial regression plot, and a CCPR plot. This function can be used for quickly checking modeling assumptions with respect to a single regressor.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(prestige_model, "education", fig=fig)
#### Fit Plot
# The plot_fit function plots the fitted values versus a chosen independent variable. It includes prediction confidence intervals and optionally plots the true dependent variable.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_fit(prestige_model, "education", ax=ax)
### Statewide Crime 2009 Dataset
# Compare the following to http://www.ats.ucla.edu/stat/stata/webbooks/reg/chapter4/statareg_self_assessment_answers4.htm
#
# Though the data here is not the same as in that example. You could run that example by uncommenting the necessary cells below.
#dta = pd.read_csv("http://www.stat.ufl.edu/~aa/social/csv_files/statewide-crime-2.csv")
#dta = dta.set_index("State", inplace=True).dropna()
#dta.rename(columns={"VR" : "crime",
# "MR" : "murder",
# "M" : "pctmetro",
# "W" : "pctwhite",
# "H" : "pcths",
# "P" : "poverty",
# "S" : "single"
# }, inplace=True)
#
#crime_model = ols("murder ~ pctmetro + poverty + pcths + single", data=dta).fit()
dta = sm.datasets.statecrime.load_pandas().data
crime_model = ols("murder ~ urban + poverty + hs_grad + single", data=dta).fit()
print(crime_model.summary())
#### Partial Regression Plots
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(crime_model, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("murder", "hs_grad", ["urban", "poverty", "single"], ax=ax, data=dta)
#### Leverage-Resid<sup>2</sup> Plot
# Closely related to the influence_plot is the leverage-resid<sup>2</sup> plot.
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.plot_leverage_resid2(crime_model, ax=ax)
#### Influence Plot
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.influence_plot(crime_model, ax=ax)
#### Using robust regression to correct for outliers.
# Part of the problem here in recreating the Stata results is that M-estimators are not robust to leverage points. MM-estimators should do better with this examples.
from statsmodels.formula.api import rlm
rob_crime_model = rlm("murder ~ urban + poverty + hs_grad + single", data=dta,
M=sm.robust.norms.TukeyBiweight(3)).fit(conv="weights")
print(rob_crime_model.summary())
#rob_crime_model = rlm("murder ~ pctmetro + poverty + pcths + single", data=dta, M=sm.robust.norms.TukeyBiweight()).fit(conv="weights")
#print(rob_crime_model.summary())
# There aren't yet an influence diagnostics as part of RLM, but we can recreate them. (This depends on the status of [issue #888](https://github.com/statsmodels/statsmodels/issues/808))
weights = rob_crime_model.weights
idx = weights > 0
X = rob_crime_model.model.exog[idx]
ww = weights[idx] / weights[idx].mean()
hat_matrix_diag = ww*(X*np.linalg.pinv(X).T).sum(1)
resid = rob_crime_model.resid
resid2 = resid**2
resid2 /= resid2.sum()
nobs = int(idx.sum())
hm = hat_matrix_diag.mean()
rm = resid2.mean()
from statsmodels.graphics import utils
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(resid2[idx], hat_matrix_diag, 'o')
ax = utils.annotate_axes(range(nobs), labels=rob_crime_model.model.data.row_labels[idx],
points=lzip(resid2[idx], hat_matrix_diag), offset_points=[(-5,5)]*nobs,
size="large", ax=ax)
ax.set_xlabel("resid2")
ax.set_ylabel("leverage")
ylim = ax.get_ylim()
ax.vlines(rm, *ylim)
xlim = ax.get_xlim()
ax.hlines(hm, *xlim)
ax.margins(0,0)
| bsd-3-clause |
xapharius/HadoopML | Engine/src/examples/linear_regression.py | 2 | 1979 | import numpy as np
from algorithms.linearRegression.LinearRegressionFactory import LinearRegressionFactory
from algorithms.linearRegression.scipy_linreg import SciPyLinReg
from algorithms.linearRegression.scipy_linreg_factory import SciPyLinRegFactory
from datahandler.numerical.NumericalDataHandler import NumericalDataHandler
from engine.constants.run_type import *
from engine.engine import Engine
from validator.PredictionValidator import PredictionValidator
import matplotlib.pyplot as plt
if __name__ == '__main__':
print("=== Linear Regression Example ===")
nr_params = 11
nr_label_dim = 1
run_type = LOCAL
data_file = 'hdfs:///user/linda/ml/data/winequality-red.csv' if run_type == HADOOP else '../data/wine-quality/winequality-red.csv'
input_scalling = None
target_scalling = None
print( "\n data: " + data_file
+ "\n params: " + str(nr_params)
+ "\n label dim: " + str(nr_label_dim)
+ "\n run type: " + run_type
+ "\n input scalling: " + str(input_scalling)
+ "\n target scalling: " + str(target_scalling)
+ "\n"
)
# 1. define algorithm
# regression = LinearRegressionFactory(nr_params)
regression = SciPyLinRegFactory(SciPyLinReg.RIDGE)
# 2. set data handler (pre-processing, normalization, data set creation)
data_handler = NumericalDataHandler(nr_params, nr_label_dim, input_scalling=input_scalling, target_scalling=target_scalling)
# 3. run
engine = Engine(regression, data_file, data_handler=data_handler, verbose=True)
trained_alg = engine.start(_run_type=run_type)
# 4. validate result
validation_stats = engine.validate(trained_alg, PredictionValidator(), _run_type=run_type)
targets = np.array(validation_stats['targets'])
pred = np.array(validation_stats['pred'])
plt.plot(targets, 'go')
plt.plot(pred, 'r+')
plt.show()
| mit |
Richert/BrainNetworks | CMC/analysis/EIC_criticality.py | 1 | 1745 | import matplotlib.pyplot as plt
from CMC.config import EIC
import numpy as np
# parameters
############
T = 10000.0 # simulation time (s)
dt = 1e-1 # integration step-size (s)
Ne = 100 # number of excitatory neurons
Ni = 20 # number of inhibitory neurons
# connectivities
################
W_ee = np.random.uniform(0., 1.0, size=(Ne, Ne)) # connections between excitatory neurons
W_ee = (W_ee >= 0.9) * 1.0
W_ii = np.zeros((Ni, Ni)) # connections between inhibitory neurons
W_ei = np.random.uniform(0., 1.0, size=(Ne, Ni)) # connections from inhibitory to excitatory neurons
W_ie = np.random.uniform(0., 1.0, size=(Ni, Ne)) # connections from excitatory to inhibitory neurons
# input
steps = int(T/dt)
Ie = 500.0 + np.random.randn(steps, Ne) * 50.0 * np.sqrt(dt) # input current for excitatory neurons
Ii = 440.0 + np.random.randn(steps, Ni) * 44.0 * np.sqrt(dt) # input current for inhibitory neurons
# ei-circuit setup and simulation
#################################
net = EIC(W_ee=W_ee, W_ii=W_ii, W_ei=W_ei, W_ie=W_ie, k=200.0, dt=dt) # setup of network
v, s = net.run(T=T, I_e=Ie, I_i=Ii) # network simulation
spikes = [np.argwhere(s_tmp > 0).squeeze() for s_tmp in s.T] # spike extraction
# plotting
##########
fig, axes = plt.subplots(ncols=2, figsize=(15, 8))
axes[0].plot(v[:-1]) # plot membrane potentials
axes[0].set_ylim(-80.0, 20.0)
try:
axes[1].eventplot(spikes, colors='k', lineoffsets=1, linelengths=1) # plot spikes
axes[1].set_xlim(0, steps)
except TypeError:
pass
plt.tight_layout()
plt.show()
| apache-2.0 |
Jimmy-Morzaria/scikit-learn | setup.py | 7 | 6416 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
mwaskom/seaborn | examples/heat_scatter.py | 2 | 1187 | """
Scatterplot heatmap
-------------------
_thumb: .5, .5
"""
import seaborn as sns
sns.set_theme(style="whitegrid")
# Load the brain networks dataset, select subset, and collapse the multi-index
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
used_networks = [1, 5, 6, 7, 8, 12, 13, 17]
used_columns = (df.columns
.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
df.columns = df.columns.map("-".join)
# Compute a correlation matrix and convert to long-form
corr_mat = df.corr().stack().reset_index(name="correlation")
# Draw each cell as a scatter point with varying size and color
g = sns.relplot(
data=corr_mat,
x="level_0", y="level_1", hue="correlation", size="correlation",
palette="vlag", hue_norm=(-1, 1), edgecolor=".7",
height=10, sizes=(50, 250), size_norm=(-.2, .8),
)
# Tweak the figure to finalize
g.set(xlabel="", ylabel="", aspect="equal")
g.despine(left=True, bottom=True)
g.ax.margins(.02)
for label in g.ax.get_xticklabels():
label.set_rotation(90)
for artist in g.legend.legendHandles:
artist.set_edgecolor(".7")
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/mpl_toolkits/gtktools.py | 8 | 19346 | """
Some gtk specific tools and widgets
* rec2gtk : put record array in GTK treeview - requires gtk
Example usage
import matplotlib.mlab as mlab
import mpl_toolkits.gtktools as gtktools
r = mlab.csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = mlab.FormatFloat(2),
change = mlab.FormatPercent(2),
cost = mlab.FormatThousands(2),
)
exceltools.rec2excel(r, 'test.xls', formatd=formatd)
mlab.rec2csv(r, 'test.csv', formatd=formatd)
import gtk
scroll = gtktools.rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import copy
import gtk, gobject
import numpy as npy
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
def error_message(msg, parent=None, title=None):
"""
create an error message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
else:
dialog.set_title('Error!')
dialog.show()
dialog.run()
dialog.destroy()
return None
def simple_message(msg, parent=None, title=None):
"""
create a simple message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
dialog.show()
dialog.run()
dialog.destroy()
return None
def gtkformat_factory(format, colnum):
"""
copy the format, perform any overrides, and attach an gtk style attrs
xalign = 0.
cell = None
"""
if format is None: return None
format = copy.copy(format)
format.xalign = 0.
format.cell = None
def negative_red_cell(column, cell, model, thisiter):
val = model.get_value(thisiter, colnum)
try: val = float(val)
except: cell.set_property('foreground', 'black')
else:
if val<0:
cell.set_property('foreground', 'red')
else:
cell.set_property('foreground', 'black')
if isinstance(format, mlab.FormatFloat) or isinstance(format, mlab.FormatInt):
format.cell = negative_red_cell
format.xalign = 1.
elif isinstance(format, mlab.FormatDate):
format.xalign = 1.
return format
class SortedStringsScrolledWindow(gtk.ScrolledWindow):
"""
A simple treeview/liststore assuming all columns are strings.
Supports ascending/descending sort by clicking on column header
"""
def __init__(self, colheaders, formatterd=None):
"""
xalignd if not None, is a dict mapping col header to xalignent (default 1)
formatterd if not None, is a dict mapping col header to a ColumnFormatter
"""
gtk.ScrolledWindow.__init__(self)
self.colheaders = colheaders
self.seq = None # not initialized with accts
self.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
types = [gobject.TYPE_STRING] * len(colheaders)
model = self.model = gtk.ListStore(*types)
treeview = gtk.TreeView(self.model)
treeview.show()
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
treeview.set_rules_hint(True)
class Clicked:
def __init__(self, parent, i):
self.parent = parent
self.i = i
self.num = 0
def __call__(self, column):
ind = []
dsu = []
for rownum, thisiter in enumerate(self.parent.iters):
val = model.get_value(thisiter, self.i)
try: val = float(val.strip().rstrip('%'))
except ValueError: pass
if mlab.safe_isnan(val): val = npy.inf # force nan to sort uniquely
dsu.append((val, rownum))
dsu.sort()
if not self.num%2: dsu.reverse()
vals, otherind = list(zip(*dsu))
ind.extend(otherind)
self.parent.model.reorder(ind)
newiters = []
for i in ind:
newiters.append(self.parent.iters[i])
self.parent.iters = newiters[:]
for i, thisiter in enumerate(self.parent.iters):
key = tuple([self.parent.model.get_value(thisiter, j) for j in range(len(colheaders))])
self.parent.rownumd[i] = key
self.num+=1
if formatterd is None:
formatterd = dict()
formatterd = formatterd.copy()
for i, header in enumerate(colheaders):
renderer = gtk.CellRendererText()
if header not in formatterd:
formatterd[header] = ColumnFormatter()
formatter = formatterd[header]
column = gtk.TreeViewColumn(header, renderer, text=i)
renderer.set_property('xalign', formatter.xalign)
renderer.set_property('editable', True)
renderer.connect("edited", self.position_edited, i)
column.connect('clicked', Clicked(self, i))
column.set_property('clickable', True)
if formatter.cell is not None:
column.set_cell_data_func(renderer, formatter.cell)
treeview.append_column(column)
self.formatterd = formatterd
self.lastcol = column
self.add(treeview)
self.treeview = treeview
self.clear()
def position_edited(self, renderer, path, newtext, position):
#print path, position
self.model[path][position] = newtext
def clear(self):
self.iterd = dict()
self.iters = [] # an ordered list of iters
self.rownumd = dict() # a map from rownum -> symbol
self.model.clear()
self.datad = dict()
def flat(self, row):
seq = []
for i,val in enumerate(row):
formatter = self.formatterd.get(self.colheaders[i])
seq.extend([i,formatter.tostr(val)])
return seq
def __delete_selected(self, *unused): # untested
keyd = dict([(thisiter, key) for key, thisiter in self.iterd.values()])
for row in self.get_selected():
key = tuple(row)
thisiter = self.iterd[key]
self.model.remove(thisiter)
del self.datad[key]
del self.iterd[key]
self.iters.remove(thisiter)
for i, thisiter in enumerate(self.iters):
self.rownumd[i] = keyd[thisiter]
def delete_row(self, row):
key = tuple(row)
thisiter = self.iterd[key]
self.model.remove(thisiter)
del self.datad[key]
del self.iterd[key]
self.rownumd[len(self.iters)] = key
self.iters.remove(thisiter)
for rownum, thiskey in list(six.iteritems(self.rownumd)):
if thiskey==key: del self.rownumd[rownum]
def add_row(self, row):
thisiter = self.model.append()
self.model.set(thisiter, *self.flat(row))
key = tuple(row)
self.datad[key] = row
self.iterd[key] = thisiter
self.rownumd[len(self.iters)] = key
self.iters.append(thisiter)
def update_row(self, rownum, newrow):
key = self.rownumd[rownum]
thisiter = self.iterd[key]
newkey = tuple(newrow)
self.rownumd[rownum] = newkey
del self.datad[key]
del self.iterd[key]
self.datad[newkey] = newrow
self.iterd[newkey] = thisiter
self.model.set(thisiter, *self.flat(newrow))
def get_row(self, rownum):
key = self.rownumd[rownum]
return self.datad[key]
def get_selected(self):
selected = []
def foreach(model, path, iter, selected):
selected.append(model.get_value(iter, 0))
self.treeview.get_selection().selected_foreach(foreach, selected)
return selected
def rec2gtk(r, formatd=None, rownum=0, autowin=True):
"""
formatd is a dictionary mapping dtype name -> mlab.Format instances
This function creates a SortedStringsScrolledWindow (derived
from gtk.ScrolledWindow) and returns it. if autowin is True,
a gtk.Window is created, attached to the
SortedStringsScrolledWindow instance, shown and returned. If
autowin=False, the caller is responsible for adding the
SortedStringsScrolledWindow instance to a gtk widget and
showing it.
"""
if formatd is None:
formatd = dict()
formats = []
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = mlab.defaultformatd.get(dt.type, mlab.FormatObj())
#print 'gtk fmt factory', i, name, format, type(format)
format = gtkformat_factory(format, i)
formatd[name] = format
colheaders = r.dtype.names
scroll = SortedStringsScrolledWindow(colheaders, formatd)
ind = npy.arange(len(r.dtype.names))
for row in r:
scroll.add_row(row)
if autowin:
win = gtk.Window()
win.set_default_size(800,600)
#win.set_geometry_hints(scroll)
win.add(scroll)
win.show_all()
scroll.win = win
return scroll
class RecListStore(gtk.ListStore):
"""
A liststore as a model of an editable record array.
attributes:
* r - the record array with the edited values
* formatd - the list of mlab.FormatObj instances, with gtk attachments
* stringd - a dict mapping dtype names to a list of valid strings for the combo drop downs
* callbacks - a matplotlib.cbook.CallbackRegistry. Connect to the cell_changed with
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s'%(oldval, newval, liststore.r[rownum][colname]))
cid = liststore.callbacks.connect('cell_changed', mycallback)
"""
def __init__(self, r, formatd=None, stringd=None):
"""
r is a numpy record array
formatd is a dict mapping dtype name to mlab.FormatObj instances
stringd, if not None, is a dict mapping dtype names to a list of
valid strings for a combo drop down editor
"""
if stringd is None:
stringd = dict()
if formatd is None:
formatd = mlab.get_formatd(r)
self.stringd = stringd
self.callbacks = cbook.CallbackRegistry(['cell_changed'])
self.r = r
self.headers = r.dtype.names
self.formats = [gtkformat_factory(formatd.get(name, mlab.FormatObj()),i)
for i,name in enumerate(self.headers)]
# use the gtk attached versions
self.formatd = formatd = dict(zip(self.headers, self.formats))
types = []
for format in self.formats:
if isinstance(format, mlab.FormatBool):
types.append(gobject.TYPE_BOOLEAN)
else:
types.append(gobject.TYPE_STRING)
self.combod = dict()
if len(stringd):
types.extend([gobject.TYPE_INT]*len(stringd))
keys = list(six.iterkeys(stringd))
keys.sort()
valid = set(r.dtype.names)
for ikey, key in enumerate(keys):
assert(key in valid)
combostore = gtk.ListStore(gobject.TYPE_STRING)
for s in stringd[key]:
combostore.append([s])
self.combod[key] = combostore, len(self.headers)+ikey
gtk.ListStore.__init__(self, *types)
for row in r:
vals = []
for formatter, val in zip(self.formats, row):
if isinstance(formatter, mlab.FormatBool):
vals.append(val)
else:
vals.append(formatter.tostr(val))
if len(stringd):
# todo, get correct index here?
vals.extend([0]*len(stringd))
self.append(vals)
def position_edited(self, renderer, path, newtext, position):
position = int(position)
format = self.formats[position]
rownum = int(path)
colname = self.headers[position]
oldval = self.r[rownum][colname]
try: newval = format.fromstr(newtext)
except ValueError:
msg = cbook.exception_to_str('Error converting "%s"'%newtext)
error_message(msg, title='Error')
return
self.r[rownum][colname] = newval
self[path][position] = format.tostr(newval)
self.callbacks.process('cell_changed', self, rownum, colname, oldval, newval)
def position_toggled(self, cellrenderer, path, position):
position = int(position)
format = self.formats[position]
newval = not cellrenderer.get_active()
rownum = int(path)
colname = self.headers[position]
oldval = self.r[rownum][colname]
self.r[rownum][colname] = newval
self[path][position] = newval
self.callbacks.process('cell_changed', self, rownum, colname, oldval, newval)
class RecTreeView(gtk.TreeView):
"""
An editable tree view widget for record arrays
"""
def __init__(self, recliststore, constant=None):
"""
build a gtk.TreeView to edit a RecListStore
constant, if not None, is a list of dtype names which are not editable
"""
self.recliststore = recliststore
gtk.TreeView.__init__(self, recliststore)
combostrings = set(recliststore.stringd.keys())
if constant is None:
constant = []
constant = set(constant)
for i, header in enumerate(recliststore.headers):
formatter = recliststore.formatd[header]
coltype = recliststore.get_column_type(i)
if coltype==gobject.TYPE_BOOLEAN:
renderer = gtk.CellRendererToggle()
if header not in constant:
renderer.connect("toggled", recliststore.position_toggled, i)
renderer.set_property('activatable', True)
elif header in combostrings:
renderer = gtk.CellRendererCombo()
renderer.connect("edited", recliststore.position_edited, i)
combostore, listind = recliststore.combod[header]
renderer.set_property("model", combostore)
renderer.set_property('editable', True)
else:
renderer = gtk.CellRendererText()
if header not in constant:
renderer.connect("edited", recliststore.position_edited, i)
renderer.set_property('editable', True)
if formatter is not None:
renderer.set_property('xalign', formatter.xalign)
tvcol = gtk.TreeViewColumn(header)
self.append_column(tvcol)
tvcol.pack_start(renderer, True)
if coltype == gobject.TYPE_STRING:
tvcol.add_attribute(renderer, 'text', i)
if header in combostrings:
combostore, listind = recliststore.combod[header]
tvcol.add_attribute(renderer, 'text-column', listind)
elif coltype == gobject.TYPE_BOOLEAN:
tvcol.add_attribute(renderer, 'active', i)
if formatter is not None and formatter.cell is not None:
tvcol.set_cell_data_func(renderer, formatter.cell)
self.connect("button-release-event", self.on_selection_changed)
#self.set_grid_lines(gtk.TREE_VIEW_GRID_LINES_BOTH)
self.get_selection().set_mode(gtk.SELECTION_BROWSE)
self.get_selection().set_select_function(self.on_select)
def on_select(self, *args):
return False
def on_selection_changed(self, *args):
(path, col) = self.get_cursor()
ren = col.get_cell_renderers()[0]
if isinstance(ren, gtk.CellRendererText):
self.set_cursor_on_cell(path, col, ren, start_editing=True)
def edit_recarray(r, formatd=None, stringd=None, constant=None, autowin=True):
"""
create a RecListStore and RecTreeView and return them.
If autowin is True, create a gtk.Window, insert the treeview into
it, and return it (return value will be (liststore, treeview, win)
See RecListStore and RecTreeView for a description of the keyword args
"""
liststore = RecListStore(r, formatd=formatd, stringd=stringd)
treeview = RecTreeView(liststore, constant=constant)
if autowin:
win = gtk.Window()
win.add(treeview)
win.show_all()
return liststore, treeview, win
else:
return liststore, treeview
if __name__=='__main__':
import datetime
import gtk
import numpy as np
import matplotlib.mlab as mlab
N = 10
today = datetime.date.today()
dates = [today+datetime.timedelta(days=i) for i in range(N)] # datetimes
weekdays = [d.strftime('%a') for d in dates] # strings
gains = np.random.randn(N) # floats
prices = np.random.rand(N)*1e7 # big numbers
up = gains>0 # bools
clientid = list(xrange(N)) # ints
r = np.rec.fromarrays([clientid, dates, weekdays, gains, prices, up],
names='clientid,date,weekdays,gains,prices,up')
# some custom formatters
formatd = mlab.get_formatd(r)
formatd['date'] = mlab.FormatDate('%Y-%m-%d')
formatd['prices'] = mlab.FormatMillions(precision=1)
formatd['gain'] = mlab.FormatPercent(precision=2)
# use a drop down combo for weekdays
stringd = dict(weekdays=['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'])
constant = ['clientid'] # block editing of this field
liststore = RecListStore(r, formatd=formatd, stringd=stringd)
treeview = RecTreeView(liststore, constant=constant)
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s'%(oldval, newval, liststore.r[rownum][colname]))
liststore.callbacks.connect('cell_changed', mycallback)
win = gtk.Window()
win.set_title('with full customization')
win.add(treeview)
win.show_all()
# or you just use the defaults
r2 = r.copy()
ls, tv, win2 = edit_recarray(r2)
win2.set_title('with all defaults')
gtk.main()
| gpl-2.0 |
lewisc/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_filter_test.py | 13 | 10428 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.filter functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
import random
from lxml import etree
import datetime
class DicomFilterTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomFilterTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
self.count = self.dicom.metadata.count()
def test_filter_one_key(self):
"""test filter with basic filter function"""
# extract a key-value pair from the first row metadata for our use
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# ask dicom to filter using our key-value filter function
self.dicom.filter(self._filter_key_values({ "PatientID" : patient_id }))
# we generate our own result to compare to dicom's
expected_result = self._filter({ "PatientID" : patient_id })
# ensure results match
self._compare_dicom_with_expected_result(expected_result)
def test_filter_multi_key(self):
"""test filter with basic filter function mult keyval pairs"""
# first we extract key-value pairs from the first row's metadata
# for our own use to generate a key-val dictionary
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
sopi_id = xml.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
key_val = { "PatientID" : patient_id, "SOPInstanceUID" : sopi_id }
# we use our filter function and ask dicom to filter
self.dicom.filter(self._filter_key_values(key_val))
# here we generate our own result
expected_result = self._filter(key_val)
# compare expected result to what dicom gave us
self._compare_dicom_with_expected_result(expected_result)
def test_filter_zero_matching_records(self):
"""test filter with filter function returns none"""
# we give dicom a filter function which filters by
# key-value and give it a key-value pair which will
# return 0 records
pandas = self.dicom.metadata.to_pandas()
self.dicom.filter(self._filter_key_values({ "PatientID" : -6 }))
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_nothing(self):
"""test filter with filter function filters nothing"""
# this filter function will return all records
self.dicom.filter(self._filter_nothing())
self.assertEqual(self.dicom.metadata.count(), self.count)
def test_filter_everything(self):
"""test filter function filter everything"""
# filter_everything filter out all of the records
self.dicom.filter(self._filter_everything())
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_timestamp_range(self):
"""test filter with timestamp range function"""
# we will test filter with a function which takes a begin and end
# date and returns all records with a study date between them
# we will set begin date to 15 years ago and end date to 5 years ago
begin_date = datetime.datetime.now() - datetime.timedelta(days=15*365)
end_date = datetime.datetime.now() - datetime.timedelta(days=5*365)
# here we will generate our own result by filtering for records
# which meet our criteria
expected_result = []
pandas = self.dicom.metadata.to_pandas()
# iterate through the rows and append all records with
# a study date between our begin and end date
for index, row in pandas.iterrows():
ascii_row = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(ascii_row)
study_date = xml_root.xpath(self.query.replace("KEYWORD", "StudyDate"))[0]
datetime_study_date = datetime.datetime.strptime(study_date, "%Y%m%d")
if datetime_study_date > begin_date and datetime_study_date < end_date:
expected_result.append(ascii_row)
# now we ask dicom to use our filter function below to return
# all records with a StudyDate within our specified range
self.dicom.filter(self._filter_timestamp_range(begin_date, end_date))
# ensure that expected result matches actual
self._compare_dicom_with_expected_result(expected_result)
def test_return_type_str(self):
"""test filter with function that returns strings"""
self.dicom.filter(self._filter_return_string())
self.assertEqual(3, self.dicom.metadata.count())
def test_return_type_int(self):
"""test filter wtih function that returns ints"""
self.dicom.filter(self._filter_return_int())
self.assertEqual(3, self.dicom.metadata.count())
def test_filter_has_bugs(self):
"""test filter with a broken filter function"""
with self.assertRaisesRegexp(Exception, "this filter is broken!"):
self.dicom.filter(self._filter_has_bugs())
self.dicom.metadata.count()
def test_filter_invalid_param(self):
"""test filter with an invalid param type"""
# should fail because filter takes a function not a keyvalue pair
with self.assertRaisesRegexp(Exception, "'dict' object is not callable"):
self.dicom.filter({ "PatientID" : "bla" })
self.dicom.metadata.count()
def test_filter_invalid_function(self):
"""test filter with function which takes more than one param"""
with self.assertRaisesRegexp(Exception, "takes exactly 2 arguments"):
self.dicom.filter(self._filter_invalid())
self.dicom.metadata.count()
def _filter_key_values(self, key_val):
"""filter by key-value"""
def _filter_key_value(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
for key in key_val:
xml_element_value = xml_root.xpath(".//DicomAttribute[@keyword='" + key + "']/Value/text()")[0]
if xml_element_value != key_val[key]:
return False
else:
return True
return _filter_key_value
def _filter_nothing(self):
"""returns all records"""
def _filter_nothing(row):
return True
return _filter_nothing
def _filter_everything(self):
"""returns no records"""
def _filter_everything(row):
return False
return _filter_everything
def _filter_timestamp_range(self, begin_date, end_date):
"""return records within studydate date range"""
def _filter_timestamp_range(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
timestamp = xml_root.xpath(".//DicomAttribute[@keyword='StudyDate']/Value/text()")[0]
timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d")
if begin_date < timestamp and timestamp < end_date:
return True
else:
return False
return _filter_timestamp_range
def _filter_return_string(self):
"""filter function which returns str"""
def _filter_return_string(row):
return "True"
return _filter_return_string
def _filter_return_int(self):
"""filter function returns int"""
def _filter_return_int(row):
return -1
return _filter_return_int
def _filter_has_bugs(self):
"""broken filter function"""
def _filter_has_bugs(row):
raise Exception("this filter is broken!")
return _filter_has_bugs
def _filter_invalid(self):
"""filter function takes 2 params"""
# filter is invalid because it takes
# 2 parameters
def _filter_invalid(index, row):
return True
return _filter_invalid
def _filter(self, keywords):
"""filter records by key value pair"""
# here we are generating the expected result
matching_records = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
for row in pandas_metadata:
ascii_xml = row.encode("ascii", "ignore")
xml = etree.fromstring(row.encode("ascii", "ignore"))
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))
if this_row_keyword_value == keyword:
matching_records.append(ascii_xml)
return matching_records
def _compare_dicom_with_expected_result(self, expected_result):
"""compare expected result with actual result"""
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
for expected, actual in zip(expected_result, pandas_result):
actual_ascii = actual.encode("ascii", "ignore")
self.assertEqual(actual_ascii, expected)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
UASLab/ImageAnalysis | scripts/sandbox/lbp.py | 1 | 8814 | #!/usr/bin/python3
import argparse
import cv2
import math
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import classifier
texture_and_color = False
# goal_step = 160 # this is a tuning dial
parser = argparse.ArgumentParser(description='local binary patterns test.')
parser.add_argument('--image', required=True, help='image name')
parser.add_argument('--scale', type=float, default=0.4, help='scale image before processing')
parser.add_argument('--model', help='saved learning model name')
args = parser.parse_args()
rgb = cv2.imread(args.image, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
tmodel = classifier.Classifier()
if args.model:
tmodel.init_model(args.model + ".fit", args.model + ".data")
else:
texture_mode = init_model()
tmodel.compute_lbp(rgb)
tmodel.compute_grid()
# gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
# if False:
# lab = cv2.cvtColor(rgb, cv2.COLOR_BGR2LAB)
# l, a, b = cv2.split(lab)
# # cv2 hue range: 0 - 179
# # target_hue_value = 0 # red = 0
# # t1 = np.mod((hue.astype('float') + 90), 180)
# # print('t1:', np.min(t1), np.max(t1))
# # #cv2.imshow('t1', cv2.resize(t1, (int(w*args.scale), int(h*args.scale))))
# # dist = np.abs(90 - t1)
# # print('dist:', np.min(dist), np.max(dist))
# # t2 = 255 - (dist*dist) * (255 / 90)
# # t2[t2<0] = 0
# # weight = (hue.astype('float')/255) * (sat.astype('float')/255)
# # index = (t2 * weight).astype('uint8')
# index = a
# elif False:
# hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
# hue, sat, val = cv2.split(hsv)
# # cv2 hue range: 0 - 179
# target_hue_value = 0 # red = 0
# t1 = np.mod((hue.astype('float') + 90), 180)
# print('t1:', np.min(t1), np.max(t1))
# #cv2.imshow('t1', cv2.resize(t1, (int(w*args.scale), int(h*args.scale))))
# dist = np.abs(90 - t1)
# print('dist:', np.min(dist), np.max(dist))
# t2 = 255 - (dist*dist) * (255 / 90)
# t2[t2<0] = 0
# weight = (hue.astype('float')/255) * (sat.astype('float')/255)
# index = (t2 * weight).astype('uint8')
# #index = hue
# elif False:
# # very dark pixels can map out noisily
# g, b, r = cv2.split(rgb)
# g[g==0] = 1
# r[r==0] = 1
# ng = g.astype('float') / 255.0
# nr = r.astype('float') / 255.0
# index = (nr - ng) / (nr + ng)
# print("range:", np.min(index), np.max(index))
# #index[index<0.5] = -1.0
# index = ((0.5 * index + 0.5) * 255).astype('uint8')
# elif True:
# # very dark pixels can map out noisily
# g, b, r = cv2.split(rgb)
# g[g==0] = 1 # protect against divide by zero
# ratio = (r / g).astype('float') * 0.25
# # knock out the low end
# lum = gray.astype('float') / 255
# lumf = lum / 0.15
# lumf[lumf>1] = 1
# ratio *= lumf
# #ratio[ratio<0.5] = 0
# ratio[ratio>1] = 1
# gray = (ratio*255).astype('uint8')
# index = gray
# print("range:", np.min(index), np.max(index))
(h, w) = tmodel.index.shape[:2]
# cv2.imshow('index', cv2.resize(tmodel.index, (int(w*args.scale), int(h*args.scale))))
scale_orig = cv2.resize(rgb, (int(w*args.scale), int(h*args.scale)))
scale = scale_orig.copy()
gscale = cv2.cvtColor(scale, cv2.COLOR_BGR2GRAY)
# def gen_classifier(lbp, index, r1, r2, c1, c2):
# lbp_region = lbp[r1:r2,c1:c2]
# (hist, _) = np.histogram(lbp_region.ravel(),
# bins=np.arange(0, numPoints + 3),
# range=(0, numPoints + 2))
# if texture_and_color:
# index_region = index[r1:r2,c1:c2]
# (index_hist, _) = np.histogram(index_region.ravel(),
# bins=64,
# range=(0, 255))
# #index_hist[0] = 0
# hist = np.concatenate((hist, index_hist), axis=None)
# if False:
# # dist histogram
# plt.figure()
# y_pos = np.arange(len(hist))
# plt.bar(y_pos, hist, align='center', alpha=0.5)
# plt.xticks(y_pos, range(len(hist)))
# plt.ylabel('count')
# plt.title('classifier')
# plt.show()
# return hist
def draw(image, r1, r2, c1, c2, color, width):
cv2.rectangle(image,
(int(c1*args.scale), int(r1*args.scale)),
(int((c2)*args.scale)-1, int((r2)*args.scale)-1),
color=color, thickness=width)
def draw_prediction(image, cell_list, selected_cell, show_grid, alpha=0.25):
#colors_hex = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
# '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
colors_hex = ['#2ca02c', '#ff6f0e', '#9467bd', '#1f77b4', '#d62728',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
colors = []
for c in colors_hex:
r = int(c[1:3], 16)
g = int(c[3:5], 16)
b = int(c[5:7], 16)
colors.append( (r, g, b) )
overlay = image.copy()
for key in cell_list:
cell = cell_list[key]
(r1, r2, c1, c2) = cell["region"]
if show_grid == "user" and cell["user"] != None:
index = ord(cell["user"]) - ord('0')
color = colors[index]
draw(overlay, r1, r2, c1, c2, color, cv2.FILLED)
elif show_grid == "prediction" and cell["prediction"] != None:
index = ord(cell["prediction"][0]) - ord('0')
if index > 0 and abs(cell["score"][0]) > 0.25:
color = colors[index]
draw(overlay, r1, r2, c1, c2, color, cv2.FILLED)
result = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
if show_grid == "prediction":
overlay = result.copy()
for key in cell_list:
cell = cell_list[key]
(r1, r2, c1, c2) = cell["region"]
if cell["user"] != None:
color = colors[ord(cell["user"]) - ord('0')]
draw(overlay, r1, r2, c1, c2, color, 2)
result = cv2.addWeighted(overlay, alpha, result, 1 - alpha, 0)
if selected_cell != None:
(r1, r2, c1, c2) = cell_list[selected_cell]["region"]
draw(result, r1, r2, c1, c2, (255,255,255), 2)
return result
selected_cell = None
show_grid = "user"
win = 'scale'
scale = draw_prediction(scale_orig, tmodel.cells, selected_cell, show_grid)
cv2.imshow(win, scale)
def onmouse(event, x, y, flags, param):
global selected_cell
if event == cv2.EVENT_LBUTTONDOWN:
# show region detail
key = tmodel.find_key(int(x/args.scale), int(y/args.scale))
selected_cell = key
(r1, r2, c1, c2) = tmodel.cells[key]["region"]
rgb_region = rgb[r1:r2,c1:c2]
cv2.imshow('region', cv2.resize(rgb_region, ( (r2-r1)*3, (c2-c1)*3) ))
scale = draw_prediction(scale_orig, tmodel.cells, selected_cell, show_grid)
cv2.imshow(win, scale)
elif event == cv2.EVENT_RBUTTONDOWN:
key = tmodel.find_key(int(x/args.scale), int(y/args.scale))
#if cell_list[key]["user"] == None:
# cell_list[key]["user"] = "yes"
#elif cell_list[key]["user"] == "yes":
# cell_list[key]["user"] = "no"
#else:
# cell_list[key]["user"] = None
scale = draw_prediction(scale_orig, tmodel.cells, selected_cell, show_grid)
cv2.imshow(win, scale)
cv2.setMouseCallback(win, onmouse)
# work list
work_list = list(tmodel.cells.keys())
random.shuffle(work_list)
index = 0
while index < len(work_list):
key = work_list[index]
selected_cell = key
scale = draw_prediction(scale_orig, tmodel.cells, selected_cell, show_grid)
(r1, r2, c1, c2) = tmodel.cells[key]["region"]
print(r1, r2, c1, c2)
rgb_region = rgb[r1:r2,c1:c2]
cv2.imshow('gray', gscale)
cv2.imshow('scale', scale)
cv2.imshow('region', cv2.resize(rgb_region, ( (r2-r1)*3, (c2-c1)*3) ))
keyb = cv2.waitKey()
if keyb >= ord('0') and keyb <= ord('9'):
tmodel.cells[selected_cell]["user"] = chr(keyb)
if key == selected_cell:
index += 1
elif keyb == ord(' '):
# pass this cell
index += 1
elif keyb == ord('g'):
if show_grid == "user":
show_grid = "prediction"
elif show_grid == "prediction":
show_grid = "none"
elif show_grid == "none":
show_grid = "user"
elif keyb == ord('f'):
tmodel.update_model()
tmodel.update_prediction()
elif keyb == ord('q'):
quit()
# if False:
# # dist histogram
# plt.figure()
# y_pos = np.arange(len(hist))
# plt.bar(y_pos, hist, align='center', alpha=0.5)
# plt.xticks(y_pos, range(len(hist)))
# plt.ylabel('count')
# plt.title('total distance histogram')
# plt.show()
| mit |
ocelot-collab/ocelot | demos/ebeam/linac_orbit_correction.py | 1 | 2262 | """
Linac Orbit Correction.
S.Tomin. 09.2019
"""
from ocelot import *
from ocelot.gui.accelerator import *
import dogleg_lattice as dl
from ocelot.cpbd.orbit_correction import *
from ocelot.cpbd.response_matrix import *
import seaborn as sns
import logging
logging.basicConfig(level=logging.INFO)
method = MethodTM()
method.global_method = SecondTM
# introduce misalignment
dl.qi_77_i1.dx = -100e-6
dl.qi_77_i1.dy = 100e-6
dl.qi_85_i1.dx = 100e-6
dl.qi_85_i1.dy = -100e-6
lat = MagneticLattice(dl.cell, method=method)
tws = twiss(lat, tws0=dl.tws0)
plot_opt_func(lat, tws, top_plot=["Dy"])
plt.show()
orb = Orbit(lat)
method = LinacRmatrixRM(lattice=orb.lat, hcors=orb.hcors, vcors=orb.vcors, bpms=orb.bpms)
#drm_method = LinacDisperseSimRM
orb.response_matrix = ResponseMatrix(method=method)
# in that case the initial Twiss is needed only for initial energy
orb.response_matrix.calculate(tw_init=dl.tws0)
ax = sns.heatmap(orb.response_matrix.df, annot=True)
ax.set_title("Orbit response matrix")
plt.show()
s_bpm_b = np.array([p.s for p in orb.bpms])
x_bpm_b, y_bpm_b = method.read_virtual_orbit(p_init=Particle())
fig, ax = plot_API(lat)
ax.plot(s_bpm_b, x_bpm_b*1000., "ro-", label="X [mm]")
ax.plot(s_bpm_b, y_bpm_b*1000., "bo-", label="Y [mm]")
ax.legend()
plt.show()
orb.correction(beta=500)
x_bpm, y_bpm = method.read_virtual_orbit(p_init=Particle())
p_list = lattice_track(lat, method.particle0)
s = [p.s for p in p_list]
x = [p.x*1000. for p in p_list]
y = [p.y*1000. for p in p_list]
fig, ax = plot_API(lat)
ax.plot(s_bpm_b, x_bpm*1000., "ro" , label="X [mm]")
ax.plot(s, x, 'r')
ax.plot(s_bpm_b, y_bpm*1000., "bo", label="Y [mm]")
ax.plot(s, y, 'b')
plt.show()
rm = orb.response_matrix.extract(cor_list=['CIX.78.I1', 'CIY.80.I1'], bpm_list=['BPMA.85.I1', 'BPMA.87.I1'])
print(rm)
rm[0, 0] = 2
rm[2, 0] = 0.1
rm[1, 1] = -0.1
print(rm)
orb.response_matrix.inject(cor_list=['CIX.78.I1', 'CIY.80.I1'], bpm_list=['BPMA.85.I1', 'BPMA.87.I1'], inj_matrix=rm)
rm_check = orb.response_matrix.extract(cor_list=['CIX.78.I1', 'CIY.80.I1'], bpm_list=['BPMA.85.I1', 'BPMA.87.I1'])
print("is RMs equal: ", (np.equal(rm, rm_check)).all())
ax = sns.heatmap(orb.response_matrix.df, annot=True)
ax.set_title("Orbit response matrix")
plt.show()
| gpl-3.0 |
iABC2XYZ/abc | Scripts/RFQVane/VaneStructure3.py | 1 | 7860 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 03 11:11:22 2017
@author: A
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import fsolve
from scipy.special import iv
#from scipy.signal import find_peaks_cwt
plt.close('all')
zStart=1.55
zEnd=230
zStep=0.005#########################################步长不要小于0.005,否则贝塞尔函数求解会出错
Freq=162.5
cLight=299792458
lambda_m=cLight/Freq/1.e6
cell_Beta_A_a_m_Z_L=np.loadtxt('V.txt')
cell=cell_Beta_A_a_m_Z_L[:,0]
Beta=cell_Beta_A_a_m_Z_L[:,1]
A=cell_Beta_A_a_m_Z_L[:,2]
a=cell_Beta_A_a_m_Z_L[:,3]
m=cell_Beta_A_a_m_Z_L[:,4]
Z=cell_Beta_A_a_m_Z_L[:,5]######################################end of the cell#####################
L=cell_Beta_A_a_m_Z_L[:,6]
V4_cell_z_a_m_r0=np.loadtxt('RFQ.V4.txt')
cell_V4=V4_cell_z_a_m_r0[:,0]############################################start at cell4#############
z_V4=np.array(V4_cell_z_a_m_r0[:,1])+1.52919########################start of the cell################
a_V4=V4_cell_z_a_m_r0[:,2]
r0_V4=V4_cell_z_a_m_r0[:,3]
L_V4=z_V4[1:]-z_V4[:-1]############################################Cell长度比其他参数少一组##########
numCell=len(cell)
nREC=int((zEnd-zStart)/zStep)+2
xREC=np.zeros((nREC,2))
xREC_2=np.zeros((nREC,2))
zREC=np.zeros(nREC)
cellREC=np.zeros(nREC)
cellFlagREC=np.zeros(nREC)
RhoREC=np.zeros(nREC)
LREC=np.zeros(nREC)
Lcal=np.zeros(nREC)
iCellFlag=1
zRec=zStart
def RFQVane(x,a,k,z,m):########################################################定义RFQ极头函数
A=(m**2-1)/(m**2*iv(0,k*a)+iv(0,m*k*a))
return x**2/a**2-(1-A*iv(0,k*x)*np.cos(k*z))/(1-A*iv(0,k*a))
def RFQVane_V4(x,a,z,L,r0):
k=np.pi/L
chi=(a/r0)**2
A=(1-chi)/iv(0,k*a)
return x**2/a**2-(1-A*iv(0,k*x)*np.cos(k*z))/chi
def Rho(a,k,m):
A=(m**2-1)/(m**2*iv(0,k*a)+iv(0,m*k*a))
Rho=0.75*a/np.sqrt(1-A*iv(0,k*a))
return Rho
iREC=0;
while (zRec<zEnd):
print(zRec)
diff_RecCell=zRec-Z
iCell=len(diff_RecCell[diff_RecCell>0]) -1 ###############################判断所取点在第几个Cell
if (iCell<4):
iCellFlag=(-1)**iCell
if (iCellFlag>0):
zCal=zRec-Z[iCell]
zCal_2=Z[iCell]-zRec
else:
zCal=Z[iCell+1]-zRec
zCal_2=zRec-Z[iCell-1]
#zCal=zRec-Z[iCell]
#k=np.pi/L[iCell]
betaK=np.interp(zRec,Z,Beta)
k=np.pi/betaK/lambda_m/100*2
#k=np.pi/np.interp(zRec,Z,L)##############################用L数据计算对比发现和用beta计算CELL长度并没有区别
aInterP=np.interp(zRec,Z,a)
mInterP=np.interp(zRec,Z,m)
xRecTmp = fsolve(RFQVane,[-0.3],args=(aInterP,k,zCal,mInterP))
xRecTmp_2 = fsolve(RFQVane,[-0.3],args=(aInterP,k,zCal_2,mInterP))
else:
aInterP=np.interp(zRec,Z,a)
mInterP=np.interp(zRec,Z,m)
betaK=np.interp(zRec,Z,Beta)
k=np.pi/betaK/lambda_m/100*2
iCellFlag=(-1)**iCell
if (iCellFlag>0):
zCal=zRec-z_V4[iCell-4]
else:
zCal=z_V4[iCell-3]-zRec
aInterP_V4=np.interp(zRec,z_V4,a_V4)
LInterP_V4=np.interp(zRec,z_V4[:-1],L_V4)
r0InterP_V4=np.interp(zRec,z_V4,r0_V4)
xRecTmp = fsolve(RFQVane_V4,[-0.6],args=(aInterP_V4,zCal,LInterP_V4,r0InterP_V4))
iREC+=1
zRec+=zStep
xREC[iREC,:]=xRecTmp
#xREC_2[iREC,:]=xRecTmp_2
zREC[iREC]=zRec
cellREC[iREC]=iCell
cellFlagREC[iREC]=iCellFlag
LREC[iREC]=np.interp(zRec,Z,L)
Lcal[iREC]=betaK*lambda_m/2*100
RhoREC[iREC]=Rho(aInterP,k,mInterP)
plt.figure('calculating result')
plt.plot(zREC,xREC[:,0],'b')
plt.hold
#plt.plot(zREC,xREC_2[:,0],'r')
######################################对比####################################
z_HV_REF=np.loadtxt('RFQ H DATA.txt')
Z_REF=z_HV_REF[:,0]/10.
X_REF=z_HV_REF[:,1]/10
Rho_REF=z_HV_REF[:,2]/10
plt.figure('Comp')
plt.plot(zREC,xREC,'b')
plt.hold
#plt.plot(zREC,xREC_2[:,0],'g')
plt.hold
plt.plot(Z_REF,X_REF,'r')
xRECInterP=np.interp(Z_REF,zREC,xREC[:,0])
plt.figure('Diff')
plt.plot(Z_REF,X_REF-xRECInterP,'r')
plt.hold
plt.plot(zREC,cellFlagREC,'g')
########################对比Rho函数##################################################
plt.figure('Rho')
'''
plt.plot(zREC,RhoREC,'b')
plt.hold
plt.plot(Z_REF,Rho_REF,'r')
plt.hold
plt.plot(Z_REF,Rho_REF-np.interp(Z_REF,zREC,RhoREC),'g')
'''
plt.plot(zREC,np.interp(zREC,Z_REF,Rho_REF),'g')
###########################对比Cell长度读取和计算函数################################
'''
plt.figure('L_COMP')
plt.plot(zREC,LREC,'r')
plt.hold
plt.plot(zREC,Lcal,'b')
plt.hold
plt.figure('L_Ratio')
plt.plot(zREC,((LREC-Lcal)/LREC))
'''
########################分析Cell数################################################
def Smooth(x):
x[0]=x[0]
x[1]=np.average(x[0:2])
x[2:-3]=(x[0:-5]+x[1:-4]+x[2:-3]+x[3:-2]+x[4:-1])/5.
x[-2]=np.average(x[-3:-1])
x[-1]=x[-1]
return x
def FindPeaks(x):
xLeft=x[1:-2]> x[0:-3]
xRight=x[1:-2]> x[2:-1]
xFlag=xLeft*xRight
indexX=np.where(xFlag==1)
return indexX
def FindValley(x):
xLeft=x[1:-2]< x[0:-3]
xRight=x[1:-2]< x[2:-1]
xFlag=xLeft*xRight
indexX=np.where(xFlag==1)
return indexX
indexPeak=((Z_REF>4.) * (Z_REF<221.5))######################定义寻峰范围
ZREFPeak=Z_REF[indexPeak]
xREFPeak=X_REF[indexPeak]
xREFPeak=Smooth(xREFPeak)
xREFPeak=Smooth(xREFPeak)
xRECPeak=xRECInterP[indexPeak]
ZRECPeak=ZREFPeak
xRECPeak=Smooth(xRECPeak)
xRECPeak=Smooth(xRECPeak)
index_xRECPeakTuple=FindPeaks(xRECPeak)
index_xREFPeakTuple=FindPeaks(xREFPeak)
index_xRECPeak=index_xRECPeakTuple[0]
index_xREFPeak=index_xREFPeakTuple[0]
print(' xRECPeak:',len(index_xRECPeak),'\n','xREFPeak:',len(index_xREFPeak))
index_xREFValleyTuple=FindValley(xREFPeak)
index_xREFValley=index_xREFValleyTuple[0]
if len(index_xREFPeak)==len(index_xREFValley):
if ((Z_REF[index_xREFPeak[0]])<(Z_REF[index_xREFValley[0]])):
Lcell_HV=Z_REF[index_xREFValley]-Z_REF[index_xREFPeak]
P_cell_PV=Z_REF[index_xREFValley]
else:
Lcell_HV=Z_REF[index_xREFPeak]-Z_REF[index_xREFValley]
P_cell_PV=Z_REF[index_xREFPeak]
elif len(index_xREFPeak)<len(index_xREFValley):
Lcell_HV=Z_REF[index_xREFPeak]-Z_REF[index_xREFValley[:-1]]
P_cell_PV=Z_REF[index_xREFPeak]
else:
Lcell_HV=Z_REF[index_xREFValley]-Z_REF[index_xREFPeak[-1]]
P_cell_PV=Z_REF[index_xREFValley]
pariout=np.loadtxt('pariout_python.txt')
Cell_pariout=pariout[:,0]
Z_pariout=pariout[:,-3]
L_pariout=pariout[:,-4]
r0_pariout=pariout[:,9]
ncell_pariout=len(Z_pariout[(Z_pariout>4.)*(Z_pariout<221.5)])
plt.figure('Length(HV_P-V)_comp_priout')
plt.plot(Z_REF[indexPeak],np.interp(Z_REF[indexPeak],P_cell_PV,Lcell_HV),'b')
plt.hold
plt.plot(Z_REF[indexPeak],np.interp(Z_REF[indexPeak],Z_pariout,L_pariout),'r')
print(' HV:',((len(index_xREFPeak))+len(index_xREFValley)),'\n','parioutcell:',ncell_pariout)
'''
plt.figure('Peak')
plt.plot(ZRECPeak,xRECPeak,'b')
plt.hold
plt.plot(ZRECPeak,xREFPeak,'r')
plt.plot(ZRECPeak[index_xRECPeak],xRECPeak[index_xRECPeak],'bo')
plt.plot(ZRECPeak[index_xREFPeak],xREFPeak[index_xREFPeak],'r*')
plt.plot(ZRECPeak[index_xREFValley],xREFPeak[index_xREFValley],'r*')
'''
##############################计算固定极头半径######################################
r0_cal_rho=r0_pariout[4:]
L_cal_rho=L_pariout[4:]
r0_sum=0
for i in range(0,len(L_cal_rho)):
r0_sum=r0_sum+r0_cal_rho[i]*L_cal_rho[i]
r0_rho=r0_sum/Z_pariout[-1]
rho_constant=0.75*r0_rho
print(' CST_RHO_constant=',rho_constant,'cm')
##############################################################################
plt.show()
| gpl-3.0 |
automl/paramsklearn | ParamSklearn/components/classification/extra_trees.py | 1 | 6420 | import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter, Constant
from ParamSklearn.components.base import ParamSklearnClassificationAlgorithm
from ParamSklearn.constants import *
from sklearn.ensemble import ExtraTreesClassifier as ETC
class ExtraTreesClassifier(ParamSklearnClassificationAlgorithm):
def __init__(self, n_estimators, criterion, min_samples_leaf,
min_samples_split, max_features, max_leaf_nodes_or_max_depth="max_depth",
bootstrap=False, max_leaf_nodes=None, max_depth="None",
min_weight_fraction_leaf=0.0,
oob_score=False, n_jobs=1, random_state=None, verbose=0,
class_weight=None):
self.n_estimators = int(n_estimators)
self.estimator_increment = 10
if criterion not in ("gini", "entropy"):
raise ValueError("'criterion' is not in ('gini', 'entropy'): "
"%s" % criterion)
self.criterion = criterion
if max_leaf_nodes_or_max_depth == "max_depth":
self.max_leaf_nodes = None
if max_depth == "None":
self.max_depth = None
else:
self.max_depth = int(max_depth)
#if use_max_depth == "True":
# self.max_depth = int(max_depth)
#elif use_max_depth == "False":
# self.max_depth = None
else:
if max_leaf_nodes == "None":
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(max_leaf_nodes)
self.max_depth = None
self.min_samples_leaf = int(min_samples_leaf)
self.min_samples_split = int(min_samples_split)
self.max_features = float(max_features)
if bootstrap == "True":
self.bootstrap = True
elif bootstrap == "False":
self.bootstrap = False
self.oob_score = oob_score
self.n_jobs = int(n_jobs)
self.random_state = random_state
self.verbose = int(verbose)
self.class_weight = class_weight
self.estimator = None
def fit(self, X, y, sample_weight=None, refit=False):
if self.estimator is None or refit:
self.iterative_fit(X, y, n_iter=1, sample_weight=sample_weight,
refit=refit)
while not self.configuration_fully_fitted():
self.iterative_fit(X, y, n_iter=1, sample_weight=sample_weight)
return self
def iterative_fit(self, X, y, sample_weight=None, n_iter=1, refit=False):
if refit:
self.estimator = None
if self.estimator is None:
num_features = X.shape[1]
max_features = int(
float(self.max_features) * (np.log(num_features) + 1))
# Use at most half of the features
max_features = max(1, min(int(X.shape[1] / 2), max_features))
self.estimator = ETC(
n_estimators=0, criterion=self.criterion,
max_depth=self.max_depth, min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf, bootstrap=self.bootstrap,
max_features=max_features, max_leaf_nodes=self.max_leaf_nodes,
oob_score=self.oob_score, n_jobs=self.n_jobs, verbose=self.verbose,
random_state=self.random_state,
class_weight=self.class_weight,
warm_start=True
)
tmp = self.estimator # TODO copy ?
tmp.n_estimators += n_iter
tmp.fit(X, y, sample_weight=sample_weight)
self.estimator = tmp
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
return not len(self.estimator.estimators_) < self.n_estimators
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'ET',
'name': 'Extra Trees Classifier',
'handles_missing_values': False,
'handles_nominal_values': False,
'handles_numerical_features': True,
'prefers_data_scaled': False,
# TODO find out if this is good because of sparcity...
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'handles_sparse': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,),
# TODO find out what is best used here!
# But rather fortran or C-contiguous?
'preferred_dtype': np.float32}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_estimators = cs.add_hyperparameter(Constant("n_estimators", 100))
criterion = cs.add_hyperparameter(CategoricalHyperparameter(
"criterion", ["gini", "entropy"], default="gini"))
max_features = cs.add_hyperparameter(UniformFloatHyperparameter(
"max_features", 0.5, 5, default=1))
max_depth = cs.add_hyperparameter(
UnParametrizedHyperparameter(name="max_depth", value="None"))
min_samples_split = cs.add_hyperparameter(UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default=2))
min_samples_leaf = cs.add_hyperparameter(UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default=1))
min_weight_fraction_leaf = cs.add_hyperparameter(Constant(
'min_weight_fraction_leaf', 0.))
bootstrap = cs.add_hyperparameter(CategoricalHyperparameter(
"bootstrap", ["True", "False"], default="False"))
return cs
| bsd-3-clause |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/bezier.py | 10 | 15695 | """
A module providing some utility functions regarding bezier path manipulation.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.path import Path
from operator import xor
import warnings
class NonIntersectingPathException(ValueError):
pass
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a * d - b * c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_ * line1_rhs + b_ * line2_rhs
y = c_ * line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*, return
locations of the two points located along its perpendicular line at the
distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1 - t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
# FIXME spelling mistake in the name of the parameter ``tolerence``
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise NonIntersectingPathException(
"the segment does not seem to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0] - end[0]) ** 2 + \
(start[1] - end[1]) ** 2 < tolerence ** 2:
return t0, t1
# calculate the middle point
middle_t = 0.5 * (t0 + t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment(object):
"""
A simple class of a 2-dimensional bezier segment
"""
# Higher order bezier lines can be supported by simplying adding
# corresponding values.
_binom_coeff = {1: np.array([1., 1.]),
2: np.array([1., 2., 1.]),
3: np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:, 0]
yy = _control_points[:, 1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1. - t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside
the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t * r + cx, sin_t * r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax,
tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = next(path_iter)
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold = 0
i = 1
for ctl_points, command in path_iter:
iold = i
i += len(ctl_points) // 2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = list(zip(bezier_path[::2], bezier_path[1::2]))
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r ** 2
def _f(xy):
x, y = xy
return (x - cx) ** 2 + (y - cy) ** 2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1 - x0, y1 - y0
d = (dx * dx + dy * dy) ** .5
return dx / d, dy / d
def check_if_parallel(dx1, dy1, dx2, dy2, tolerence=1.e-5):
""" returns
* 1 if two lines are parralel in same direction
* -1 if two lines are parralel in opposite direction
* 0 otherwise
"""
theta1 = np.arctan2(dx1, dy1)
theta2 = np.arctan2(dx2, dy2)
dtheta = np.abs(theta1 - theta2)
if dtheta < tolerence:
return 1
elif np.abs(dtheta - np.pi) < tolerence:
return -1
else:
return False
def get_parallels(bezier2, width):
"""
Given the quadratic bezier control points *bezier2*, returns
control points of quadratic bezier lines roughly parallel to given
one separated by *width*.
"""
# The parallel bezier lines are constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the
# bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
cmx - c2x, cmy - c2y)
if parallel_test == -1:
warnings.warn(
"Lines do not intersect. A straight line is used instead.")
#cmx, cmy = 0.5*(c1x+c2x), 0.5*(c1y+c2y)
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
cos_t2, sin_t2 = cos_t1, sin_t1
else:
# t1 and t2 is the angle between c1 and cm, cm, c2. They are
# also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
if parallel_test != 0:
# a special case for a straight line, i.e., angle between two
# lines are smaller than some (arbitrtay) value.
cmx_left, cmy_left = \
0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
cmx_right, cmy_right = \
0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
else:
cmx_left, cmy_left = \
get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = \
get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left),
(cmx_left, cmy_left),
(c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right),
(cmx_right, cmy_right),
(c2x_right, c2y_right)]
return path_left, path_right
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parametric value 0, 0.5, and 1.
"""
cmx = .5 * (4 * mmx - (c1x + c2x))
cmy = .5 * (4 * mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns control points of two quadrativ
bezier lines having a width roughly parralel to given one separated by
*width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
# c12-c23
c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
def make_path_regular(p):
"""
fill in the codes if None.
"""
c = p.codes
if c is None:
c = np.empty(p.vertices.shape[:1], "i")
c.fill(Path.LINETO)
c[0] = Path.MOVETO
return Path(p.vertices, c)
else:
return p
def concatenate_paths(paths):
"""
concatenate list of paths into a single path.
"""
vertices = []
codes = []
for p in paths:
p = make_path_regular(p)
vertices.append(p.vertices)
codes.append(p.codes)
_path = Path(np.concatenate(vertices),
np.concatenate(codes))
return _path
| mit |
Obus/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
leiran/aermodpy | aermod.py | 1 | 32889 | #!/usr/bin/env python
"""Python interface to AERMOD modeling system files.
design notes:
+ Bug on POST processing; only processes 996 hours
- proposed fix: in-place averaging, discard hourly data
developed for python 3.x
"""
# docstring metadata
__author__ = "Leiran Biton"
__copyright__ = "Copyright 2015"
__credits__ = []
__license__ = "GPL"
__version__ = "0.11"
__maintainer__ = "Leiran Biton"
__email__ = "[email protected]"
__status__ = "Production"
# standard library imports
import os.path
import datetime
import numpy
import csv
# internal package imports
from aermodpy.support import pollutant_dict, vars_indices, ordinal
class point(object):
def __init__(self, num, **kwargs):
"""Point object
mandatory arguments:
num - number of points
optional arguments:
Xs - array of x locations for # of points. default = zeros
Ys - array of y locations for # of points. default = zeros
Zs - array of z locations for # of points. default = zeros
XYs - array shape=(num, 2) of x and y locations for # of points. replaces Xs & Ys.
XYZs - array shape=(num, 3) of x, y, and z locations for # of points. replaces Xs, Ys, and Zs.
"""
self.num = num
self.X = kwargs.get("Xs", numpy.zeros(num))
self.Y = kwargs.get("Ys", numpy.zeros(num))
self.Z = kwargs.get("Zs", numpy.zeros(num))
if "XYs" in kwargs:
self.X = kwargs["XYs"][:,0]
self.Y = kwargs["XYs"][:,1]
if "XYZs" in kwargs:
self.X = kwargs["XYZs"][:,0]
self.Y = kwargs["XYZs"][:,1]
self.Z = kwargs["XYZs"][:,2]
class post:
"POST file processor"
verbose = False
DEBUG = False
# default data
formatstring = "(3(1X,F13.5),3(1X,F8.2),3X,A5,2X,A8,2X,A4,6X,A8,2X,I8)"
vars_index = None
def __init__(self
,filename
,directory="."
,receptors=0
,formatstring_override=False
,century=20
,vars_index=vars_indices["post"]
,verbose=True
,DEBUG=False
):
self.POSTfile = self.openfile(filename, directory=directory, mode="rU")
self.century = century
self.datetimes = [] # empty list for datetime objects
self.modeldoc = []
self.datatypes = []
self.POSTdata = {}
self.receptors = point(receptors)
self.formatstring_override = formatstring_override
self.vars_index = vars_index
self.verbose = verbose
self.DEBUG = DEBUG
def decode_format_datastring(self
,formatstring):
"""placeholder function for decoding a string describing POST file dataformat"""
# example format '* FORMAT: (3(1X,F13.5),3(1X,F8.2),3X,A5,2X,A8,2X,A4,6X,A8,2X,I8)'
return formatstring
def decode_data(self
,dataline
#,formatstring=formatstring
):
# example format '* FORMAT: (3(1X,F13.5),3(1X,F8.2),3X,A5,2X,A8,2X,A4,6X,A8,2X,I8)'
# example head '* X Y AVERAGE CONC ZELEV ZHILL ZFLAG AVE GRP HIVAL NET ID DATE(CONC)\n
# example data ' 569830.00000 4909393.00000 3065.99300 494.10 747.20 0.00 1-HR ALL 08033104\n'
#if self.formatstring_override:
# self.decode_format_datastring(self, formatstring)
if all([datetime_part in self.vars_index for datetime_part in ("year","month","day","hour")]):
dt = datetime.datetime(self.vars_index["year"]["type"](dataline[self.vars_index["year"]["start"]:
self.vars_index["year"]["end" ]]) + self.century*100
,self.vars_index["month"]["type"](dataline[self.vars_index["month"]["start"]:
self.vars_index["month"]["end" ]])
,self.vars_index["day"]["type"](dataline[self.vars_index["day"]["start"]:
self.vars_index["day"]["end" ]])
,self.vars_index["hour"]["type"](dataline[self.vars_index["hour"]["start"]:
self.vars_index["hour"]["end" ]]) - 1
)
else:
dt = None
return [self.vars_index[var]["type"](dataline[self.vars_index[var]["start"]:self.vars_index[var]["end"]])
for var in ["x", "y", "zflag","conc"]
], dt
def add_buildings(self
,filename
,directory="."
,nosources=False
):
self.building_vertices = {}
self.sources = {}
if self.verbose: print("--> opening building data file")
self.building_file = self.openfile(filename, directory, "rU")
# throw away header data
[next(self.building_file) for header in range(2)]
units, unit_value = next(self.building_file).split()
if self.DEBUG: print("DEBUG: units / unit_value:", units, unit_value)
utmy, trash = next(self.building_file).split()
num_bldgs = int(next(self.building_file))
if self.DEBUG: print("DEBUG: number of buildings:", num_bldgs)
for building in range(num_bldgs):
try:
name, stories, elev = self.building_header()
self.process_building(name, stories, elev)
except:
raise Exception("No more buildings to process")
if not nosources:
num_srcs = int(next(self.building_file))
for src in range(num_srcs):
try:
raw_source_line = next(self.building_file).strip()
source_descriptors = raw_source_line.replace("'", "").split(sep=None, maxsplit=5)
if len(source_descriptors) == 5:
name, elev, height, x, y = source_descriptors
else:
trash, elev, height, x, y, name = source_descriptors
name = name.strip()
if self.DEBUG: print("DEBUG: source name:", name, x, y)
self.sources[(name)] = \
point(1, Xs=numpy.array(float(x))
, Ys=numpy.array(float(y))
)
if self.verbose: print("adding source:", self.sources[(name)].X, self.sources[(name)].Y)
except:
raise Exception("No more sources to process")
def building_header(self):
"""get building data for new building"""
building_descriptors = next(self.building_file).split(sep=None, maxsplit=3)
if len(building_descriptors) == 3:
name_padded, stories, base_elevation = building_descriptors
else:
trash, stories, base_elevation, name_padded = building_descriptors
if self.verbose: print("adding building: ", name_padded.strip(), stories, base_elevation)
return name_padded.strip().replace("'",""), int(stories), float(base_elevation)
def process_building(self
,name
,stories
,base_elevation
):
"""adds building data to the self.building_vertices dictionary"""
for story in range(stories):
self.process_building_story(name, story+1)
def process_building_story(self
,name
,story
):
"""process a building story"""
vertices, height = next(self.building_file).split()
vertices = int(vertices)
height = float(height)
vs = numpy.array([(float(X), float(Y)) for (X, Y) in \
[next(self.building_file).split() for v in range(vertices)] \
]).reshape(vertices, 2)
self.building_vertices[(name, story)] = point(vertices, XYs=vs)
def openfile(self
,filename
,directory="."
,mode="rU"
):
# files
try:
filepath = directory + os.path.sep + filename
except TypeError:
raise TypeError("Invalid 'directory' or 'filename' inputs!")
if self.verbose: print("Opening file:", filepath)
if self.verbose: print(" mode =", mode)
try: openfile = open(filepath, mode)
except:
raise IOError("Filepath '%s' failed to open. Check the address and mode." % filepath)
return openfile
def getPOSTfileMetaData(self):
"""Get metadata from POSTfile"""
try:
[filetype_doc
,optionsflag_doc
,modeloptions_doc
,datatype_doc
,receptors_doc
,dataformat_doc
] = [next(self.POSTfile) for i in range(6)]
except:
raise Exception("POST file does not contain proper header metadata")
# extract format string from data format documentation
if self.DEBUG: print("DEBUG: filetype_doc =", filetype_doc)
if self.DEBUG: print("DEBUG: dataformat_doc =", dataformat_doc)
dataformat_string = dataformat_doc[dataformat_doc.index(":")+1:].strip()
# decode data format string
dataformat = self.decode_format_datastring(dataformat_string)
if self.formatstring_override:
# function still in development
self.formatstring = dataformat
else:
self.modeldoc.append((filetype_doc
,optionsflag_doc
,modeloptions_doc
,datatype_doc
,receptors_doc
,dataformat_doc
))
datatype_metadata = datatype_doc.split()
r_type = datatype_metadata[datatype_metadata.index("VALUES")-1]
r_form = datatype_metadata[datatype_metadata.index("OF")+1:\
datatype_metadata.index("VALUES")-1]
r_form = " ".join(r_form)
source_group = datatype_metadata[-1]
if self.DEBUG: print("DEBUG:", r_type, r_form, source_group)
self.datatypes.append((r_type, r_form, source_group))
self.POSTdata[(r_type, r_form, source_group)] = numpy.zeros([self.receptors.num, 1])
if len(self.modeldoc) == 1:
n_receptors = [int(s) for s in receptors_doc.split() if s.isdigit()][0]
self.receptors = point(n_receptors)
def getPOSTfileHeader(self):
"""Get metadata from POSTfile"""
self.fileheader = next(self.POSTfile).strip()
next(self.POSTfile) # -------- line
def printResults(self, filename, r_type, **kwargs):
"""print(r_type results data array to outfile as comma separated values)"""
outfile = self.openfile(filename, directory=kwargs.get("directory", "."), mode="w")
self.POSTdata[(r_type, r_form, source_group)].tofile(outfile, sep=",")
outfile.close()
def scalePOSTdata(self, r_type, **kwargs):
"""scales POSTdata result_type using optional "scalar" keyword argument. if omitted, 1.0."""
if self.DEBUG: print("DEBUG: scaling %s results by" % r_type, kwargs.get("scalar", 1.0))
self.POSTdata[(r_type, r_form, source_group)] *= kwargs.get("scalar", 1.0)
def processPOSTData(self
,ranked=1
,annual=False
):
"""Process stored POST file data"""
if self.verbose: print("--> processing open data file")
while True:
try:
self.getPOSTfileMetaData()
self.getPOSTfileHeader()
self.POSTdata[self.datatypes[-1]] = numpy.zeros([self.receptors.num, ranked])
h = 0
if "hour" in self.vars_index:
while True:
try:
self.getPOSTfileData(h=h, annual=annual, ranked=ranked)
h += 1
except Exception as e:
if self.DEBUG:
print("DEBUG: reached exception during file processing")
print("DEBUG:", "Unexpected error:", e)
print(" ", self.datetimes[-1])
return
else:
try:
self.getPOSTfileData(h=h, annual=annual, ranked=ranked)
if self.DEBUG:
print("DEBUG: got 1 instance of POST data")
except:
return
except:
return
def getPOSTfileData(self
,h=0
,annual=False
,ranked=1
):
"""Get data from POSTfile, process for average number of hours"""
if self.verbose: print("--> retrieving data")
if h == 0:
self.POSTdata[self.datatypes[-1]] = numpy.zeros([self.receptors.num, ranked])
if annual:
self.POSTdata[self.datatypes[-1]] = numpy.expand_dims(self.POSTdata[self.datatypes[-1]], axis=2)
for r in range(self.receptors.num):
line = next(self.POSTfile)
# decode data
data4hour, dt = self.decode_data(line)
# build datetime list
if r == 0:
if self.DEBUG: print("DEBUG:", "processing for", dt)
if annual and (h > 0) and (dt.year > self.datetimes[-1].year):
self.POSTdata[self.datatypes[-1]] = numpy.append(self.POSTdata[self.datatypes[-1]]
,numpy.zeros([self.receptors.num, ranked, 1])
,axis=2
)
self.datetimes.append(dt)
# populate receptor location values
if h == 0:
self.receptors.X[r] = data4hour[0]
self.receptors.Y[r] = data4hour[1]
self.receptors.Z[r] = data4hour[2]
if annual:
receptor_data = numpy.append(self.POSTdata[self.datatypes[-1]][r,:,-1], [data4hour[3]], axis=1)
receptor_data.sort()
self.POSTdata[self.datatypes[-1]][r,:,-1] = receptor_data[::-1][:ranked]
else:
receptor_data = numpy.append(self.POSTdata[self.datatypes[-1]][r,:], [data4hour[3]], axis=1)
receptor_data.sort()
self.POSTdata[self.datatypes[-1]][r,:] = receptor_data[::-1][:ranked]
return
def draw_building(self
,building
,story
,axis
,origin=point(1,Xs=[0],Ys=[0])
,**kwargs
):
import matplotlib.patches as patches
from matplotlib.path import Path
"""method for drawing buildings"""
# create polygon using path method
verts = [(x-origin.X, y-origin.Y) \
for (x, y) \
in zip(self.building_vertices[(building, story)].X
,self.building_vertices[(building, story)].Y
)]
verts.append(verts[0]) # add first point to close polygon
codes = [Path.LINETO for coords in verts]
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
path = Path(verts, codes)
patch = patches.PathPatch(path
,facecolor=kwargs.get("color", "white")
,edgecolor='black'
,linewidth=kwargs.get("linewidth", 0.4)
,alpha=kwargs.get("alpha", 1.00)
)
axis.add_patch(patch)
if kwargs.get("building_name", False) and (story == 1):
axis.annotate(str(building)
,xy=((self.building_vertices[(building, story)].X - origin.X).mean()
,(self.building_vertices[(building, story)].Y - origin.Y).mean())
,va="center"
,ha="center"
,color="blue"
,size=kwargs.get("max_textsize", 8)
)
def printdata(self
,r_type
,r_form # datatype key for POSTdata
,source_group
,filename="aermod_results.csv"
,directory="."
,**kwargs
):
with self.openfile(filename, directory, "w") as csvoutfile:
csvoutfile.write(r_type+"\n")
csvoutfile.write(r_form+"\n")
csvoutfile.write(source_group+"\n")
w = csv.writer(csvoutfile)
rank = kwargs.get("ranked_data", 0)
rank_index = 0 if rank == 0 else rank-1
if kwargs.get("exclude_flagpole_receptors", False):
concs = self.POSTdata[(r_type, r_form, source_group)][:,rank_index][self.receptors.Z==0] * kwargs.get("scalar", 1.0) + kwargs.get("add_background", 0.0)
else:
concs = self.POSTdata[(r_type, r_form, source_group)][:,rank_index] * kwargs.get("scalar", 1.0) + kwargs.get("add_background", 0.0)
outlist = [kwargs.get("scale_decimals","%0.0f") % concs.max()]
w.writerow(outlist)
def gridplot(self
,r_type
,r_form # datatype key for POSTdata
,source_group
,levels=[0,10,20,30,40,50,60,70,80,90,100,150,200,250,300]
,**kwargs
):
"""creates an individual grid plot based on input concentration array
kwargs:
contours - width of contour lines to draw. no contour lines if omitted
levels - list of levels to be used in the contour plots
receptor_size - an integer for sizing the receptors to be plotted. default = 15. enter 0 to omit.
plot_max - an integer for sizing the datapoint for the max conc in the domain. enter 0 to omit.
distance_from_origin -
receptor_type - receptor representation as matplotlib marker indicators (default=".")
max_plot - if 0, maximum point omitted.
tickinterval - interval of x and y axes
noticks - if True, omit x-y ticks
labelsize - size of colorbar and x-y tick labels
labelgap - gap between ticks and labels
nocolorbar - if True: colorbar omitted
scale_decimals - colorbar number formatting (e.g. "%0.0f")
filename - string for filename. default: "aermod.png"
colorbar_spacing - "uniform" or "proportional" (default = "proportional")
interpolation_method - linear cubic nearest
contour_colors - list of contour colors to use for contours. if omitted, hot colorscale is used.
colorslevels - colors and levels
scalar - multiplier for concentration data
exclude_flagpole_receptors - Default = False, set to True to exclude flagpole receptors
add_background - Default value = 0.0
ranked_data - use ranked dataset of value n. Default=1.
annual - POSTdata has annual values (default=False)
"""
import matplotlib
import matplotlib.pyplot as plt
#from scipy.interpolate import griddata
from matplotlib.mlab import griddata
if kwargs.get("exclude_flagpole_receptors", False):
if self.DEBUG: print("DEBUG: removing flagpole receptors")
receptor_array = numpy.column_stack((self.receptors.X[self.receptors.Z==0]
,self.receptors.Y[self.receptors.Z==0]
,self.receptors.Z[self.receptors.Z==0]))
else:
receptor_array = numpy.column_stack((self.receptors.X, self.receptors.Y, self.receptors.Z))
receptor_num = len(receptor_array)
receptors = point(receptor_num
,XYZs=receptor_array)
rank = kwargs.get("ranked_data", 0)
rank_index = 0 if rank == 0 else rank-1
if kwargs.get("annual", False):
if self.DEBUG: print("DEBUG: 'annual' flag is on. Averaging all years.")
if kwargs.get("exclude_flagpole_receptors", False):
if self.DEBUG: print("DEBUG: removing flagplot data")
concs = numpy.mean(self.POSTdata[(r_type, r_form, source_group)][:,rank_index,:], axis=1)[self.receptors.Z==0] * kwargs.get("scalar", 1.0) + kwargs.get("add_background", 0.0)
else:
concs = numpy.mean(self.POSTdata[(r_type, r_form, source_group)][:,rank_index,:], axis=1) * kwargs.get("scalar", 1.0) + kwargs.get("add_background", 0.0)
else:
if kwargs.get("exclude_flagpole_receptors", False):
if self.DEBUG: print("DEBUG: removing flagplot data")
concs = self.POSTdata[(r_type, r_form, source_group)][:,rank_index][self.receptors.Z==0] * kwargs.get("scalar", 1.0) + kwargs.get("add_background", 0.0)
else:
concs = self.POSTdata[(r_type, r_form, source_group)][:,rank_index] * kwargs.get("scalar", 1.0) + kwargs.get("add_background", 0.0)
# define grid.
x_range = receptors.X.max() - receptors.X.min()
y_range = receptors.Y.max() - receptors.Y.min()
xi = numpy.linspace(receptors.X.min(), receptors.X.max(), round(receptors.num**0.85))
yi = numpy.linspace(receptors.Y.min(), receptors.Y.max(), round(receptors.num**0.85))
distance_from_origin = kwargs.get("distance_from_origin", max(x_range/2, y_range/2))
if self.DEBUG: print("DEBUG: distance_from_origin -", distance_from_origin)
origin = point(1)
origin.X = (receptors.X.max() + receptors.X.min())/2
origin.Y = (receptors.Y.max() + receptors.Y.min())/2
# instantiate figure
figure = plt.figure(num=None
,figsize=(6.5, 6) if kwargs.get("nocolorbar", False) else (8, 6)
,dpi=80
,facecolor="white"
,edgecolor="black"
)
ax = figure.add_subplot(111
,aspect="equal"
)
# grid the data.
if self.DEBUG: print("DEBUG: receptors.X:", type(receptors.X), receptors.X)
if self.DEBUG: print("DEBUG: receptors.X:", type(receptors.Y), receptors.Y)
zi = griddata(receptors.X - origin.X,
receptors.Y - origin.Y,
concs,
0, 0,
interp = kwargs.get("interpolation_method", "linear"))
if self.DEBUG: print("DEBUG:", zi)
# define contour levels and colors
if kwargs.get("colorslevels", None):
levels = [level for level, color, label in kwargs["colorslevels"]]
kwargs["levels"] = levels
kwargs["contour_colors"] = [color for level, color, label in kwargs["colorslevels"]]
# draw the contours using contour(X,Y,Z,V) formulation (see documentation)
CS = plt.contour(xi - origin.X, # X
yi - origin.Y, # Y
zi, # Z
levels, # V
linewidths=float(kwargs.get("contours", 0)),
colors="black")
# fill the contours
if kwargs.get("contour_colors", None):
cmap, norm = matplotlib.colors.from_levels_and_colors(levels=levels
,colors=kwargs.get("contour_colors", ["white" for level in levels])[:-1]
,extend="neither"
)
else:
cmap = plt.cm.hot_r
norm = matplotlib.colors.Normalize(vmin=0, vmax=1)
CS = plt.contourf(xi - origin.X
,yi - origin.Y
,zi
,levels
,cmap=cmap
,norm=norm
)
# prepare the colorbar
if not kwargs.get("nocolorbar", False):
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
if kwargs.get("colorslevels", False):
if self.DEBUG: print("DEBUG: setting colorbar labels")
labels = [label for level, color, label in kwargs.get("colorslevels", False)]
else:
labels = ["" for level in levels]
colorbar = figure.colorbar(CS
,ax=ax
,format=kwargs.get("scale_decimals", "%0.1f")
,spacing=kwargs.get("colorbar_spacing", "proportional")
,shrink=1.0 # same size as map
)
display_units = pollutant_dict[kwargs["pollutant"]][1]
colorbar.set_label("Concentration (%s)" %display_units
,size=kwargs.get("max_textsize", 10)
)
colorbar.set_ticks(levels)
colorbar.set_ticklabels(labels)
colorbar.ax.tick_params(labelsize=kwargs.get("labelsize", 10)
,colors="black"
,axis="both"
,direction="out"
)
if kwargs.get("tickinterval", None):
tickinterval = kwargs.get("tickinterval", 100)
# build ticks
aticks = ["0m"]
ticks = [0]
j = tickinterval
while j <= distance_from_origin:
aticks = [-j] + aticks + [j]
ticks = [-j] + ticks + [j]
j += tickinterval
ticks = numpy.array(ticks)
CS.ax.set_xticks(ticks)
CS.ax.set_yticks(ticks)
CS.ax.set_xticklabels(aticks, rotation=90)
CS.ax.set_yticklabels(aticks)
else:
if self.DEBUG: print("DEBUG: ticklabels set")
ax.ticklabel_format(axis="both"
,style="plain"
,useOffset=0
)
# set tick interval
ax.set_xlim(-distance_from_origin, distance_from_origin)
ax.set_ylim(-distance_from_origin, distance_from_origin)
# format tick marks
ax.tick_params(axis="both"
,direction="out"
,length=0 if kwargs.get("noticks", False) else 4 # default tick length is 4. Can be omitted if requested using noticks option
,color="grey"
,width=1
,pad=kwargs.get("labelgap",4)
,labelsize=kwargs.get("labelsize", 10)
)
# plot data points.
if kwargs.get("receptor_size", 12):
scat = ax.scatter(receptors.X - origin.X
,receptors.Y - origin.Y
,marker=kwargs.get("receptor_type", "o")
,c=(1,1,1,0) # in place of marker_style which I can't get to work
,s=kwargs.get("receptor_size", 12)
,zorder=10
)
if kwargs.get("max_plot", True):
max_point = point(1
,Xs=numpy.array([receptors.X[concs.argmax()] - origin.X])
,Ys=numpy.array([receptors.Y[concs.argmax()] - origin.Y])
)
if self.DEBUG:
print("DEBUG: max plot:")
print(" X =", max_point.X[0])
print(" Y =", max_point.Y[0])
print(" c =", concs.max())
ax.annotate('+ Maximum Concentration: '+ kwargs.get("scale_decimals","%0.0f") % concs.max()
,(0.5, 0)
,(0, -40 + (kwargs.get("max_textsize", 10)))
,xycoords='axes fraction'
,ha="center"
,va="top"
,textcoords='offset points'
,size=kwargs.get("max_textsize", 10)
)
ax.scatter(max_point.X
,max_point.Y
,marker="+"
,c=(0,0,0) # in place of marker_style which I can't get to work
,s=kwargs.get("max_plot", 50)
,zorder=10
)
if kwargs.get("add_background", False):
ax.annotate('Includes background\nconcentration: '+ kwargs.get("scale_decimals","%0.0f") % kwargs.get("add_background", 0.0)
,(1.05, 0)
,(0, -32 + (kwargs.get("max_textsize", 10)))
,xycoords='axes fraction'
,ha="left"
,va="top"
,textcoords='offset points'
,size=kwargs.get("max_textsize", 10)
)
if kwargs.get("transparent_buildings", False):
if self.DEBUG: print("DEBUG: transparent buildings")
building_color = "#FFFFFF00"
else:
building_color = "white"
if kwargs.get("buildings", False):
for name, story in sorted(self.building_vertices.keys()):
self.draw_building(name, story, ax, origin=origin
,color=kwargs.get("building_color", building_color)
,linewidth=kwargs.get("building_linewidth", 0.4)
,building_name=kwargs.get("building_name", False)
)
if kwargs.get("sources", False):
for name, source in self.sources.items():
if self.DEBUG:
print("DEBUG: source:", name, source.X, source.Y)
ax.scatter(source.X - origin.X
,source.Y - origin.Y
,marker="o"
,c=(0,0,0)
,s=kwargs.get("sources", 10)
,zorder=10
)
if self.DEBUG: print("DEBUG: sources successfully plotted")
ax.set_title(pollutant_dict[kwargs.get("pollutant", "PM2.5")][0] + " " + \
("" if r_form is "CONCURRENT" else r_type )+ "\n" + \
("%s HIGHEST " %(ordinal(rank)) if rank else "") + \
("HOURLY" if (r_form == "CONCURRENT") else r_form)
,size=kwargs.get("title_size", 10)
,loc="left"
,ha="left"
,position=(0.05,1.012)
)
ax.set_title("SOURCE(S): \n"+source_group
,size=kwargs.get("title_size", 10)
,loc="right"
,ha="left"
,position=(0.75,1.012)
)
plt.savefig(kwargs.get("filename", "aermod.png"))
plt.close("all")
| gpl-2.0 |
Vidhyalakshimi/blog | DMPs/cs.py | 4 | 4434 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class CanonicalSystem():
"""Implementation of the canonical dynamical system
as described in Dr. Stefan Schaal's (2002) paper"""
def __init__(self, dt, pattern='discrete'):
"""Default values from Schaal (2012)
dt float: the timestep
pattern string: either 'discrete' or 'rhythmic'
"""
self.ax = 1.0
self.pattern = pattern
if pattern == 'discrete':
self.step = self.step_discrete
self.run_time = 1.0
elif pattern == 'rhythmic':
self.step = self.step_rhythmic
self.run_time = 2*np.pi
else:
raise Exception('Invalid pattern type specified: \
Please specify rhythmic or discrete.')
self.dt = dt
self.timesteps = int(self.run_time / self.dt)
self.reset_state()
def rollout(self, **kwargs):
"""Generate x for open loop movements.
"""
if kwargs.has_key('tau'):
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
self.x_track = np.zeros(timesteps)
self.reset_state()
for t in range(timesteps):
self.x_track[t] = self.x
self.step(**kwargs)
return self.x_track
def reset_state(self):
"""Reset the system state"""
self.x = 1.0
def step_discrete(self, tau=1.0, error_coupling=1.0):
"""Generate a single step of x for discrete
(potentially closed) loop movements.
Decaying from 1 to 0 according to dx = -ax*x.
tau float: gain on execution time
increase tau to make the system execute faster
error_coupling float: slow down if the error is > 1
"""
self.x += (-self.ax * self.x * error_coupling) * tau * self.dt
return self.x
def step_rhythmic(self, tau=1.0, error_coupling=1.0):
"""Generate a single step of x for rhythmic
closed loop movements. Decaying from 1 to 0
according to dx = -ax*x.
tau float: gain on execution time
increase tau to make the system execute faster
error_coupling float: slow down if the error is > 1
"""
self.x += (1 * error_coupling * tau) * self.dt
return self.x
#==============================
# Test code
#==============================
if __name__ == "__main__":
cs = CanonicalSystem(dt=.001, pattern='discrete')
# test normal rollout
x_track1 = cs.rollout()
cs.reset_state()
# test error coupling
timesteps = int(1.0/.001)
x_track2 = np.zeros(timesteps)
err = np.zeros(timesteps)
err[200:400] = 2
err_coup = 1.0 / (1 + err)
for i in range(timesteps):
x_track2[i] = cs.step(error_coupling=err_coup[i])
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots(figsize=(6,3))
ax1.plot(x_track1, lw=2)
ax1.plot(x_track2, lw=2)
plt.grid()
plt.legend(['normal rollout', 'error coupling'])
ax2 = ax1.twinx()
ax2.plot(err, 'r-', lw=2)
plt.legend(['error'], loc='lower right')
plt.ylim(0, 3.5)
plt.xlabel('time (s)')
plt.ylabel('x')
plt.title('Canonical system - discrete')
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.tight_layout()
cs = CanonicalSystem(dt=.001, pattern='rhythmic')
# test normal rollout
x_track1 = cs.rollout()
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots(figsize=(6,3))
ax1.plot(x_track1, lw=2)
plt.grid()
plt.legend(['normal rollout'], loc='lower right')
plt.xlabel('time (s)')
plt.ylabel('x')
plt.title('Canonical system - rhythmic')
plt.show()
| gpl-3.0 |
lsolanka/gridcells | setup.py | 2 | 4136 | '''Setup script for GridCells.'''
from __future__ import absolute_import, print_function, division
from os.path import join
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
all_packages = [
'gridcells',
'gridcells.analysis',
'gridcells.core',
'gridcells.plotting',
]
default_swig_opts = [
'-builtin',
'-Wall',
'-O',
'-ignoremissing',
'-c++',
'-Iexternal/armanpy/include',
'-Isrc/include'
]
ARMANPY_INCLUDE_DIR = 'external/armanpy/include'
ARMANPY_DEPS = [
join(ARMANPY_INCLUDE_DIR, 'armanpy.hpp'),
join(ARMANPY_INCLUDE_DIR, 'armanpy.i'),
join(ARMANPY_INCLUDE_DIR, 'armanpy_1d.i'),
join(ARMANPY_INCLUDE_DIR, 'armanpy_2d.i'),
join(ARMANPY_INCLUDE_DIR, 'armanpy_3d.i'),
join(ARMANPY_INCLUDE_DIR, 'numpy.i'),
]
class DelayedExtension(Extension, object):
"""
A distutils Extension subclass where some of its members
may have delayed computation until reaching the build phase.
This is so we can, for example, get the Numpy include dirs
after pip has installed Numpy for us if it wasn't already
on the system.
This class has been adapted from the matplotlib package.
"""
def __init__(self, *args, **kwargs):
super(DelayedExtension, self).__init__(*args, **kwargs)
self._hooks = {"include_dirs": self.get_include_dirs}
@staticmethod
def get_include_dirs():
import numpy
return [
'src/include',
'external/armanpy/include',
'external/armadillo/include',
numpy.get_include()
]
class DelayedMember(property):
def __init__(self, name):
self._name = name
def __get__(self, obj, objtype=None):
result = getattr(obj, '_' + self._name, [])
if self._name in obj._hooks:
result = obj._hooks[self._name]() + result
return result
def __set__(self, obj, value):
setattr(obj, '_' + self._name, value)
include_dirs = DelayedMember('include_dirs')
field_ext = DelayedExtension('gridcells.analysis._fields',
['src/fields.cpp', 'src/fields.i'],
depends=['src/fields.cpp',
'src/include/fields.hpp',
'src/fields.i'] + ARMANPY_DEPS,
swig_opts=default_swig_opts)
common_ext = DelayedExtension('gridcells.core._common',
['src/common.cpp', 'src/common.i'],
depends=['src/common.cpp',
'src/include/common.hpp',
'src/common.i'] + ARMANPY_DEPS,
swig_opts=default_swig_opts)
spikes_ext = DelayedExtension('gridcells.analysis._spikes',
['src/spikes.cpp', 'src/spikes.i'],
depends=['src/spikes.cpp',
'src/include/spikes.hpp',
'src/spikes.i'] + ARMANPY_DEPS,
swig_opts=default_swig_opts)
signal_ext = DelayedExtension('gridcells.analysis._signal',
['src/signal.cpp', 'src/signal.i'],
depends=['src/signal.cpp',
'src/include/signal.hpp',
'src/signal.i'] + ARMANPY_DEPS,
swig_opts=default_swig_opts)
all_extensions = [
field_ext,
common_ext,
spikes_ext,
signal_ext
]
setup(
name='gridcells',
version='0.1.3',
description='Package for grid cell analysis and simulation.',
author='Lukas Solanka',
author_email='[email protected]',
url='https://github.com/lsolanka/gridcells',
license='GPL',
packages=all_packages,
ext_modules=all_extensions,
install_requires=['numpy>=1.8.0',
'scipy>=0.13.3',
'enum34>=1.0.4']
)
| gpl-3.0 |
leonardolepus/pubmad | experiments/features_20140601/distribution.py | 1 | 1833 | import pickle
import os, sys
import itertools
import matplotlib.pyplot as plt
from scipy import stats
sys.path.insert(1, os.path.abspath('../../'))
from toolbox.graph_io.kegg.parse_KGML import KGML2Graph
features = {}
for feature_file in os.listdir('../../data/evex/Homo_Sapiens/features/'):
with open('../../data/evex/Homo_Sapiens/features/'+feature_file, 'r') as f:
try:
features[feature_file] = pickle.load(f)
except:
print feature_file, sys.exc_info()
edge_betweenness_centrality = features['edge_betweenness_centrality']
del features['edge_betweenness_centrality']
def distribution(x, label):
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.set_title(label+'_hist')
ax1.hist(x, bins = 100, histtype = 'step')
ax2 = fig.add_subplot(2, 1, 2)
ax2.set_title(label+'_cumulative_normalized')
ax2.hist(x, bins = 100, cumulative = True, normed = True, histtype = 'step')
plt.savefig(label)
for i in features:
x = features[i].values()
distribution(x, i)
pathway_f = '../../data/kegg/hsa04151.xml'
kegg = KGML2Graph(pathway_f)
kegg = kegg.to_undirected()
for i in features:
ev = features[i]
ke = [ev[j] for j in kegg.nodes_iter() if j in ev]
ev = ev.values()
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.set_title(i+'_hist')
ax1.hist(ev, bins = 100, histtype = 'step', label = 'evex', normed = True)
ax1.hist(ke, bins = 100, histtype = 'step', label = 'kegg', normed = True)
ax2 = fig.add_subplot(2, 1, 2)
ax2.set_title(i+'_cumulative_normalized')
ax2.hist(ev, bins = 100, cumulative = True, normed = True, histtype = 'step', label = 'evex')
ax2.hist(ke, bins = 100, cumulative = True, normed = True, histtype = 'step', label = 'kegg')
plt.savefig(i+'_evex_VS_kegg')
| gpl-2.0 |
bukzor/sympy | sympy/external/importtools.py | 85 | 7294 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
neuro-lyon/multiglom-model | src/plot_multiple_osc_vs_rates.py | 1 | 2518 | # -*- coding:utf-8 -*-
"""
Script to plot osc vs rate curve coming from different set of simulations
Usage
-----
Change the files to analyse in the script in the variable ``list_files``.
"""
import matplotlib.pyplot as py
from matplotlib.cm import get_cmap
from scipy import *
from h5manager import *
if __name__ == '__main__':
list_files=[['db_one_glom_N100_sig035_gI20.h5','db_one_glom_N100_sig035_gI20_gE1_4.h5','db_one_glom_N100_sig035_gI20_gE3_5.h5']]
def get_data(db_filename):
DB = tables.openFile(db_filename)
# Collect, g_Ein0, firing rate, network osc freq
ATTRS = (('paramset', '_v_attrs', 'Input', 'g_Ein0'),
('results', 'spikes_it'),
('results', '_v_attrs', 'FFTMAX'))
res=get_all_attrs(DB, ATTRS)
simu_length=DB.listNodes("/")[0].paramset._v_attrs['Common']['simu_length']
N_mitral=DB.listNodes("/")[0].paramset._v_attrs['Common']['N_mitral']
start_time=simu_length/2.
all_gEin0=[r1 for r1,r2,r3 in res]
all_rates=[1.*(r2[1,:]>=start_time).sum()/N_mitral/(simu_length-start_time) for r1,r2,r3 in res]
all_freqs=[r3['mean'] for r1,r2,r3 in res]
DB.close()
return all_gEin0,all_rates,all_freqs
n_rows=len(list_files)
fig=py.figure()
for ind,sublist_files in enumerate(list_files):
ax=fig.add_subplot(n_rows,2,2*ind+1)
cmap = get_cmap('jet', len(sublist_files))
for num,db_filename in enumerate(sublist_files):
all_gEin0,all_rates,all_freqs=get_data(db_filename)
ax.scatter(all_rates,all_freqs,color=cmap(num),label=db_filename)
ax.set_xlabel("Avg firing rate")
ax.set_ylabel("Network freq")
xl=ax.get_xlim()
yl=ax.get_ylim()
coord=min(xl[1],yl[1])
coord2=min(xl[1],2*yl[1])
ax.plot([0,coord],[0,coord],'--')
ax.plot([0,coord2],[0,2*coord],'--')
ax.set_xlim(0,40)
ax.set_ylim(0,100)
ax.legend()
ax=fig.add_subplot(n_rows,2,2*ind+2)
for num,db_filename in enumerate(sublist_files):
all_gEin0,all_rates,all_freqs=get_data(db_filename)
ax.scatter(all_gEin0,all_rates,color=cmap(num),label=db_filename)
ax.set_xlabel("g_Ein0")
ax.set_ylabel("Network freq")
ax.set_xlim(0,5.2)
ax.set_ylim(0,100)
py.show() | mit |
openeemeter/eemeter | tests/test_visualization.py | 1 | 4663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import matplotlib
import pandas as pd
import pytest
matplotlib.use("Agg")
from eemeter.caltrack.usage_per_day import (
CalTRACKUsagePerDayCandidateModel,
CalTRACKUsagePerDayModelResults,
)
from eemeter.visualization import plot_energy_signature, plot_time_series
def test_plot_time_series(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
ax_m, ax_t = plot_time_series(meter_data, temperature_data)
m_data = ax_m.lines[0].get_xydata()
t_data = ax_t.lines[0].get_xydata()
assert m_data.shape == (810, 2)
assert t_data.shape == (19417, 2)
def test_plot_energy_signature(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
ax = plot_energy_signature(meter_data, temperature_data, title="title")
data = ax.collections[0].get_offsets()
assert data.shape == (810, 2)
assert ax.get_title() == "title"
def test_plot_caltrack_candidate_qualified():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
ax = candidate_model.plot(candidate_model, title="title")
data = ax.lines[0].get_xydata()
assert data.shape == (60, 2)
assert ax.get_title() == "title"
def test_plot_caltrack_candidate_disqualified():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="DISQUALIFIED",
model_params={"intercept": 1},
)
ax = candidate_model.plot()
data = ax.lines[0].get_xydata()
assert data.shape == (60, 2)
def test_plot_caltrack_candidate_with_range():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
ax = candidate_model.plot(temp_range=(10, 20))
data = ax.lines[0].get_xydata()
assert data.shape == (10, 2)
def test_plot_caltrack_candidate_best():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
ax = candidate_model.plot(best=True)
data = ax.lines[0].get_xydata()
assert data.shape == (60, 2)
def test_plot_caltrack_candidate_error():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="ERROR",
model_params={"intercept": 1},
)
ax = candidate_model.plot()
assert ax is None
def test_plot_caltrack_candidate_cdd_hdd_model():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="cdd_hdd",
formula="formula",
status="QUALIFIED",
model_params={
"beta_hdd": 1,
"beta_cdd": 1,
"cooling_balance_point": 65,
"heating_balance_point": 65,
"intercept": 1,
},
)
ax = candidate_model.plot()
data = ax.lines[0].get_xydata()
assert data.shape == (60, 2)
def test_plot_model_results(il_electricity_cdd_hdd_daily):
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
candidates=[candidate_model],
)
ax = model_results.plot(title="title", with_candidates=True)
data = ax.lines[0].get_xydata()
assert data.shape == (70, 2)
data = ax.lines[1].get_xydata()
assert data.shape == (70, 2)
assert ax.get_title() == "title"
| apache-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/groupby/test_nth.py | 2 | 16800 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna
from pandas.util.testing import assert_frame_equal, assert_series_equal
def test_first_last_nth(df):
# tests for first / last / nth
grouped = df.groupby("A")
first = grouped.first()
expected = df.loc[[1, 0], ["B", "C", "D"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = df.loc[[5, 7], ["B", "C", "D"]]
expected.index = Index(["bar", "foo"], name="A")
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = df.loc[[2, 3], ["B", "C", "D"]].copy()
expected.index = Index(["foo", "bar"], name="A")
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped["B"].first()
grouped["B"].last()
grouped["B"].nth(0)
df.loc[df["A"] == "foo", "B"] = np.nan
assert isna(grouped["B"].first()["foo"])
assert isna(grouped["B"].last()["foo"])
assert isna(grouped["B"].nth(0)["foo"])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
result = g.first()
expected = df.iloc[[1, 2]].set_index("A")
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index("A")
result = g.nth(0, dropna="any")
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(df_mixed_floats):
df = df_mixed_floats.copy()
df["E"] = True
df["F"] = 1
# tests for first / last / nth
grouped = df.groupby("A")
first = grouped.first()
expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ["B", "C", "D", "E", "F"]]
expected.index = Index(["bar", "foo"], name="A")
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = list(range(10))
idx.append(9)
s = Series(data=range(11), index=idx, name="IntCol")
assert s.dtype == "int64"
f = s.groupby(level=0).first()
assert f.dtype == "int64"
def test_nth():
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index("A"))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index("A"))
assert_frame_equal(g.nth(2), df.loc[[]].set_index("A"))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index("A"))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index("A"))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index("A"))
assert_series_equal(g.B.nth(0), df.set_index("A").B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index("A").B.iloc[[1]])
assert_frame_equal(g[["B"]].nth(0), df.loc[[0, 2], ["A", "B"]].set_index("A"))
exp = df.set_index("A")
assert_frame_equal(g.nth(0, dropna="any"), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna="any"), exp.iloc[[1, 2]])
exp["B"] = np.nan
assert_frame_equal(g.nth(7, dropna="any"), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna="any"), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame(
{
"color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"},
"food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"},
"two": {
0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997,
},
"one": {
0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997,
},
}
).set_index(["color", "food"])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype="int64")
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name == 1
assert expected2.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna="all")
assert_series_equal(result, expected)
with pytest.raises(ValueError, match="For a DataFrame groupby"):
s.groupby(g, sort=False).nth(0, dropna=True)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
result = g.B.nth(0, dropna="all")
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"])
g = df.groupby("A")
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index("A"))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index("A"))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index("A"))
assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index("A"))
assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index("A"))
assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index("A"))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index("A"))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index("A"))
business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B")
df = DataFrame(1, index=business_dates, columns=["a", "b"])
# get the first, fourth and last two business days for each month
key = [df.index.year, df.index.month]
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
[
"2014/4/1",
"2014/4/4",
"2014/4/29",
"2014/4/30",
"2014/5/1",
"2014/5/6",
"2014/5/29",
"2014/5/30",
"2014/6/2",
"2014/6/5",
"2014/6/27",
"2014/6/30",
]
)
expected = DataFrame(1, columns=["a", "b"], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(three_group):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = three_group.groupby(["A", "B"])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected_first, expected_last",
[
(
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
{
"id": ["A"],
"time": Timestamp("2012-02-01 14:00:00", tz="US/Central"),
"foo": [1],
},
),
(
{
"id": ["A", "B", "A"],
"time": [
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
],
"foo": [1, 2, 3],
},
{
"id": ["A", "B"],
"time": [
Timestamp("2012-01-01 13:00:00", tz="America/New_York"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
],
"foo": [1, 2],
},
{
"id": ["A", "B"],
"time": [
Timestamp("2012-03-01 12:00:00", tz="Europe/London"),
Timestamp("2012-02-01 14:00:00", tz="US/Central"),
],
"foo": [3, 2],
},
),
],
)
def test_first_last_tz(data, expected_first, expected_last):
# GH15884
# Test that the timezone is retained when calling first
# or last on groupby with as_index=False
df = DataFrame(data)
result = df.groupby("id", as_index=False).first()
expected = DataFrame(expected_first)
cols = ["id", "time", "foo"]
assert_frame_equal(result[cols], expected[cols])
result = df.groupby("id", as_index=False)["time"].first()
assert_frame_equal(result, expected[["id", "time"]])
result = df.groupby("id", as_index=False).last()
expected = DataFrame(expected_last)
cols = ["id", "time", "foo"]
assert_frame_equal(result[cols], expected[cols])
result = df.groupby("id", as_index=False)["time"].last()
assert_frame_equal(result, expected[["id", "time"]])
@pytest.mark.parametrize(
"method, ts, alpha",
[
["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"],
["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"],
],
)
def test_first_last_tz_multi_column(method, ts, alpha):
# GH 21603
category_string = pd.Series(list("abc")).astype("category")
df = pd.DataFrame(
{
"group": [1, 1, 2],
"category_string": category_string,
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
result = getattr(df.groupby("group"), method)()
expected = pd.DataFrame(
{
"category_string": pd.Categorical(
[alpha, "c"], dtype=category_string.dtype
),
"datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")],
},
index=pd.Index([1, 2], name="group"),
)
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected():
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
}
)
grouped = three_group.groupby(["A", "B"])
result = grouped.nth(0)
expected = DataFrame(
{"C": ["dull", "dull", "dull", "dull"]},
index=MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo"], ["one", "two", "one", "two"]],
names=["A", "B"],
),
)
assert_frame_equal(result, expected)
def test_groupby_head_tail():
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
g_as = df.groupby("A", as_index=True)
g_not_as = df.groupby("A", as_index=False)
# as_index= False, much easier
assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
empty_not_as = DataFrame(
columns=df.columns, index=pd.Index([], dtype=df.index.dtype)
)
empty_not_as["A"] = empty_not_as["A"].astype(df.A.dtype)
empty_not_as["B"] = empty_not_as["B"].astype(df.B.dtype)
assert_frame_equal(empty_not_as, g_not_as.head(0))
assert_frame_equal(empty_not_as, g_not_as.tail(0))
assert_frame_equal(empty_not_as, g_not_as.head(-1))
assert_frame_equal(empty_not_as, g_not_as.tail(-1))
assert_frame_equal(df, g_not_as.head(7)) # contains all
assert_frame_equal(df, g_not_as.tail(7))
# as_index=True, (used to be different)
df_as = df
assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
empty_as["A"] = empty_not_as["A"].astype(df.A.dtype)
empty_as["B"] = empty_not_as["B"].astype(df.B.dtype)
assert_frame_equal(empty_as, g_as.head(0))
assert_frame_equal(empty_as, g_as.tail(0))
assert_frame_equal(empty_as, g_as.head(-1))
assert_frame_equal(empty_as, g_as.tail(-1))
assert_frame_equal(df_as, g_as.head(7)) # contains all
assert_frame_equal(df_as, g_as.tail(7))
# test with selection
assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]])
assert_frame_equal(g_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]])
assert_frame_equal(g_as[["A", "B"]].head(1), df_as.loc[[0, 2]])
assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_not_as[["A"]].head(1), df_as.loc[[0, 2], ["A"]])
assert_frame_equal(g_not_as[["B"]].head(1), df_as.loc[[0, 2], ["B"]])
assert_frame_equal(g_not_as[["A", "B"]].head(1), df_as.loc[[0, 2]])
def test_group_selection_cache():
# GH 12839 nth, head, and tail should return same result consistently
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
expected = df.iloc[[0, 2]].set_index("A")
g = df.groupby("A")
result1 = g.head(n=2)
result2 = g.nth(0)
assert_frame_equal(result1, df)
assert_frame_equal(result2, expected)
g = df.groupby("A")
result1 = g.tail(n=2)
result2 = g.nth(0)
assert_frame_equal(result1, df)
assert_frame_equal(result2, expected)
g = df.groupby("A")
result1 = g.nth(0)
result2 = g.head(n=2)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, df)
g = df.groupby("A")
result1 = g.nth(0)
result2 = g.tail(n=2)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, df)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=["a", "b", "c"])
result = df.groupby("a").nth(10)
expected = DataFrame(index=Index([], name="a"), columns=["b", "c"])
assert_frame_equal(result, expected)
result = df.groupby(["a", "b"]).nth(10)
expected = DataFrame(
index=MultiIndex([[], []], [[], []], names=["a", "b"]), columns=["c"]
)
assert_frame_equal(result, expected)
def test_nth_column_order():
# GH 20760
# Check that nth preserves column order
df = DataFrame(
[[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]],
columns=["A", "C", "B"],
)
result = df.groupby("A").nth(0)
expected = DataFrame(
[["b", 100.0], ["c", 200.0]], columns=["C", "B"], index=Index([1, 2], name="A")
)
assert_frame_equal(result, expected)
result = df.groupby("A").nth(-1, dropna="any")
expected = DataFrame(
[["a", 50.0], ["d", 150.0]], columns=["C", "B"], index=Index([1, 2], name="A")
)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dropna", [None, "any", "all"])
def test_nth_nan_in_grouper(dropna):
# GH 26011
df = DataFrame(
[[np.nan, 0, 1], ["abc", 2, 3], [np.nan, 4, 5], ["def", 6, 7], [np.nan, 8, 9]],
columns=list("abc"),
)
result = df.groupby("a").nth(0, dropna=dropna)
expected = pd.DataFrame(
[[2, 3], [6, 7]], columns=list("bc"), index=Index(["abc", "def"], name="a")
)
assert_frame_equal(result, expected)
| apache-2.0 |
madgik/exareme | Exareme-Docker/src/mip-algorithms/LOGISTIC_REGRESSION/generate_testcases_logistic_regression.py | 1 | 2972 | """
WARNING: This one needs python3.6 because the 2.7 version of
sklearn doesn't support Logistic Regression without regularization.
"""
import json
from pathlib import Path
from random import shuffle
from sklearn.linear_model import LogisticRegression
from mipframework.algorithmtest import AlgorithmTest
class LogisticRegressionTest(AlgorithmTest):
def get_expected(self, alg_input):
# Get data and remove missing values
x_names = alg_input[0]["value"]
y_name = alg_input[1]["value"]
variables = x_names + "," + y_name
datasets = alg_input[3]["value"]
data = self.get_data(variables, datasets=datasets)
data = data.dropna()
# Select two categories at random for y
categories = list(set(data[y_name]))
if len(categories) < 2:
return None
shuffle(categories)
cat_0, cat_1 = categories[:2]
# Build filter
# filter_ = {
# "condition": "OR",
# "rules": [
# {
# "id": y_name,
# "field": y_name,
# "type": "string",
# "input": "text",
# "operator": "equal",
# "value": cat_0,
# },
# {
# "id": y_name,
# "field": y_name,
# "type": "string",
# "input": "text",
# "operator": "equal",
# "value": cat_1,
# },
# ],
# "valid": True,
# }
# alg_input[4]["value"] = json.dumps(filter_)
alg_input[4]["value"] = ""
alg_input[5]["value"] = cat_0
alg_input[6]["value"] = cat_1
# Filter data according to above filter
data = data[(data[y_name] == cat_0) | (data[y_name] == cat_1)]
y = data[y_name]
X = data[x_names.split(",")]
# If n_obs < n_cols reject
n_obs = len(data)
if n_obs == 0 or data.shape[0] < data.shape[1]:
return None
# Reject when one class appears less times than then number of columns
if any([len(y[y == item]) <= X.shape[1] for item in set(y)]):
return None
# Fit logistic regression and return results
logreg_res = LogisticRegression(penalty="none", solver="newton-cg").fit(X, y)
coeff = logreg_res.intercept_.tolist() + logreg_res.coef_.tolist()[0]
coeff_names = ["Intercept"] + x_names.split(",")
return {"n_obs": n_obs, "coeff": coeff, "coeff_names": coeff_names}
if __name__ == "__main__":
prop_path = dbs_folder = Path(__file__).parent / "properties.json"
logistic_regression_test = LogisticRegressionTest(prop_path.as_posix())
logistic_regression_test.generate_test_cases(num_tests=100)
logistic_regression_test.to_json("logistic_regression_expected.json")
| mit |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/compat/__init__.py | 1 | 11402 | """
compat
======
Cross-compatible functions for Python 2 and 3.
Key items to import for 2/3 compatible code:
* iterators: range(), map(), zip(), filter(), reduce()
* lists: lrange(), lmap(), lzip(), lfilter()
* unicode: u() [u"" is a syntax error in Python 3.0-3.2]
* longs: long (int in Python 3)
* callable
* iterable method compatibility: iteritems, iterkeys, itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bind_method: binds functions to classes
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
Other items:
* OrderedDefaultDict
* platform checker
"""
# pylint disable=W0611
# flake8: noqa
import functools
import itertools
from distutils.version import LooseVersion
from itertools import product
import sys
import types
from unicodedata import east_asian_width
import struct
import inspect
from collections import namedtuple
PY2 = sys.version_info[0] == 2
PY3 = (sys.version_info[0] >= 3)
PY35 = (sys.version_info >= (3, 5))
try:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
BytesIO = StringIO
import cPickle
import httplib
except ImportError:
import builtins
from io import StringIO, BytesIO
cStringIO = StringIO
import pickle as cPickle
import http.client as httplib
from pandas.compat.chainmap import DeepChainMap
if PY3:
def isidentifier(s):
return s.isidentifier()
def str_to_bytes(s, encoding=None):
return s.encode(encoding or 'ascii')
def bytes_to_str(b, encoding=None):
return b.decode(encoding or 'utf-8')
# The signature version below is directly copied from Django,
# https://github.com/django/django/pull/4846
def signature(f):
sig = inspect.signature(f)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
keywords = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
keywords = keywords[0] if keywords else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and p.default is not p.empty
] or None
argspec = namedtuple('Signature',['args','defaults',
'varargs','keywords'])
return argspec(args,defaults,varargs,keywords)
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
reduce = functools.reduce
long = int
unichr = chr
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
# Python 2
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
def isidentifier(s, dotted=False):
return bool(_name_re.match(s))
def str_to_bytes(s, encoding='ascii'):
return s
def bytes_to_str(b, encoding='ascii'):
return b
def signature(f):
return inspect.getargspec(f)
# import iterator versions of these functions
range = xrange
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
if PY2:
def iteritems(obj, **kw):
return obj.iteritems(**kw)
def iterkeys(obj, **kw):
return obj.iterkeys(**kw)
def itervalues(obj, **kw):
return obj.itervalues(**kw)
next = lambda it : it.next()
else:
def iteritems(obj, **kw):
return iter(obj.items(**kw))
def iterkeys(obj, **kw):
return iter(obj.keys(**kw))
def itervalues(obj, **kw):
return iter(obj.values(**kw))
next = next
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
# Much of the code in this module comes from Benjamin Peterson's six library.
# The license for this library can be found in LICENSES/SIX and the code can be
# found at https://bitbucket.org/gutworth/six
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
_EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
def u(s):
return s
def u_safe(s):
return s
def strlen(data, encoding=None):
# encoding is for compat with PY2
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import lzma from the std library """
import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name/qualname attributes of the function """
f.__name__ = name
f.__qualname__ = '{klass}.{name}'.format(
klass=cls.__name__,
name=name)
f.__module__ = cls.__module__
return f
ResourceWarning = ResourceWarning
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
def u(s):
return unicode(s, "unicode_escape")
def u_safe(s):
try:
return unicode(s, "unicode_escape")
except:
return s
def strlen(data, encoding=None):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import the backported lzma library
or raise ImportError if not available """
from backports import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name attributes of the function """
f.__name__ = name
return f
class ResourceWarning(Warning): pass
string_and_binary_types = string_types + (binary_type,)
try:
# callable reintroduced in later versions of Python
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
from collections import OrderedDict, Counter
if PY3:
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
# this version of raise is a syntax error in Python 3
exec("""
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
""")
raise_with_traceback.__doc__ = """Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
# http://stackoverflow.com/questions/4126348
# Thanks to @martineau at SO
from dateutil import parser as _date_parser
import dateutil
if LooseVersion(dateutil.__version__) < '2.0':
@functools.wraps(_date_parser.parse)
def parse_date(timestr, *args, **kwargs):
timestr = bytes(timestr)
return _date_parser.parse(timestr, *args, **kwargs)
elif PY2 and LooseVersion(dateutil.__version__) == '2.0':
# dateutil brokenness
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
else:
parse_date = _date_parser.parse
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, list(self.items())
# https://github.com/pydata/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == 'little'
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_platform_linux():
return sys.platform == 'linux2'
def is_platform_mac():
return sys.platform == 'darwin'
def is_platform_32bit():
return struct.calcsize("P") * 8 < 64
| mit |
okadate/romspy | romspy/make/make_ini_file.py | 1 | 6797 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
2014/12/04 OKADA Teruhisa make this file.
2015/05/02 okada remake it.
"""
from datetime import datetime
import netCDF4
import numpy as np
from numpy import dtype
#import matplotlib.pyplot as plt
import pandas as pd
import itertools
def make_ini_file(grdfile, inifile, biofile=None, bgcfile=None):
kmax = 20
Nbed = 20
nc = netCDF4.Dataset(grdfile, 'r')
imax = len(nc.dimensions['xi_rho'])
jmax = len(nc.dimensions['eta_rho'])
lon = nc.variables['lon_rho'][:,:]
lat = nc.variables['lat_rho'][:,:]
h = nc.variables['h'][:,:]
nc.close()
dstart = datetime(2012,1,1,0,0,0)
GMT = 'seconds since 1968-05-23 00:00:00 GMT'
JST = 'seconds since 1968-05-23 09:00:00 GMT'
time_out = [netCDF4.date2num(dstart, JST)]
nc = netCDF4.Dataset(inifile, 'w', format='NETCDF3_CLASSIC')
nc.Author = 'romspy.make_ini_file'
nc.Created = datetime.now().isoformat()
nc.grdfile = grdfile
nc.createDimension('xi_rho', imax)
nc.createDimension('xi_u', imax-1)
nc.createDimension('xi_v', imax)
nc.createDimension('eta_rho', jmax)
nc.createDimension('eta_u', jmax)
nc.createDimension('eta_v', jmax-1)
nc.createDimension('s_rho', kmax)
nc.createDimension('s_w', kmax+1)
nc.createDimension('ocean_time', None)
time = nc.createVariable('ocean_time', dtype('double').char, ('ocean_time',))
lon_rho = nc.createVariable('lon_rho', dtype('float32').char, ('eta_rho', 'xi_rho'))
lat_rho = nc.createVariable('lat_rho', dtype('float32').char, ('eta_rho', 'xi_rho'))
zeta = nc.createVariable('zeta', dtype('float32').char, ('ocean_time', 'eta_rho', 'xi_rho'))
ubar = nc.createVariable('ubar', dtype('float32').char, ('ocean_time', 'eta_u', 'xi_u'))
vbar = nc.createVariable('vbar', dtype('float32').char, ('ocean_time', 'eta_v', 'xi_v'))
u = nc.createVariable('u', dtype('float32').char, ('ocean_time', 's_rho', 'eta_u', 'xi_u'))
v = nc.createVariable('v', dtype('float32').char, ('ocean_time', 's_rho', 'eta_v', 'xi_v'))
temp = nc.createVariable('temp', dtype('float32').char, ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'))
salt = nc.createVariable('salt', dtype('float32').char, ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'))
time.units = GMT
lon_rho.units = 'degree_north'
lat_rho.units = 'degree_east'
zeta.units = 'meter'
ubar.units = 'meter second-1'
vbar.units = 'meter second-1'
u.units = 'meter second-1'
v.units = 'meter second-1'
temp.units = 'Celsius'
salt.units = 'PSU'
time[:] = time_out
lon_rho[:,:] = lon
lat_rho[:,:] = lat
zeta[:,:,:] = 1.5
ubar[:,:,:] = 0.0
vbar[:,:,:] = 0.0
u[:,:,:,:] = 0.0
v[:,:,:,:] = 0.0
temp[:,:,:,:] = 13.0
salt[:,:,:,:] = 32.5
zeta.time = 'ocean_time'
ubar.time = 'ocean_time'
vbar.time = 'ocean_time'
u.time = 'ocean_time'
v.time = 'ocean_time'
temp.time = 'ocean_time'
salt.time = 'ocean_time'
if biofile is not None:
add_bio(nc, biofile)
if bgcfile is not None:
add_bgc(nc, Nbed, h, bgcfile)
nc.close()
def add_bio(nc, biofile):
"""
2015/05/01 okada All value is 0, use biofile=0
"""
Chl2C = 0.05 # okada (=1/20 gChl/gC)
PhyCN = 6.625 # (=106/16 molC/molN)
C = 12.01
N = 14.01
P = 30.97
bio_names = ['NO3','NH4','chlorophyll','phytoplankton','zooplankton',
'LdetritusN','SdetritusN',
'oxygen','PO4','LdetritusP','SdetritusP']
bio_out = {}
bio_out["chlorophyll"] = 1.0
bio_out["NO3"] = 0.0233 / N * 1000.0
bio_out["NH4"] = 0.0193 / N * 1000.0
bio_out["SdetritusN"] = 0.0296 / N * 1000.0
bio_out["PO4"] = 0.0135 / P * 1000.0
bio_out["SdetritusP"] = 0.0080 / P * 1000.0
bio_out["oxygen"] = 400.0
bio_out["phytoplankton"] = bio_out["chlorophyll"] / (Chl2C * PhyCN * C)
bio_out["zooplankton"] = bio_out["phytoplankton"] * 0.1
bio_out["SdetritusN"] = bio_out["SdetritusN"] / 2.0
bio_out["SdetritusP"] = bio_out["SdetritusP"] / 2.0
bio_out["LdetritusN"] = bio_out["SdetritusN"]
bio_out["LdetritusP"] = bio_out["SdetritusP"]
nc.createDimension('bio_tracer', len(bio_names))
bio = {}
for name in bio_names:
bio[name] = nc.createVariable(name, dtype('float32').char, ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'))
for name in bio_names:
bio[name].units = 'milimole meter-3'
for name in bio_names:
bio[name][:,:,:,:] = bio_out[name]
for name in bio_names:
bio[name].time = 'ocean_time'
return nc
def add_bgc(nc, Nbed, h, bgcfile):
"""
2015/05/01 okada Read from rst file.
"""
bgc_names = ['O2','NH4','NO3','PO4','SO4','H2S','Mn','Fe','CH4','DOMf','DOMs',
'POMf','POMs','POMn','FeOOHA','FeOOHB','FeOOHP','MnO2A','MnO2B',
'S0','FeS','FeS2']
bgc_out = {}
imax = len(nc.dimensions['xi_rho'])
jmax = len(nc.dimensions['eta_rho'])
for name in bgc_names:
bgc_out[name] = np.ndarray(shape=[1, Nbed, jmax, imax])
rst1 = pd.read_csv(bgcfile.format(1))
rst2 = pd.read_csv(bgcfile.format(2))
print rst1.describe()
for name in bgc_names:
print name,
for i, j in itertools.product(range(imax), range(jmax)):
if 0.5 < h[j,i] < 18.0:
bgc_out[name][0,:,j,i] = rst1[name][:Nbed]
elif h[j,i] >= 18.0:
bgc_out[name][0,:,j,i] = rst2[name][:Nbed]
else:
bgc_out[name][0,:,j,i] = 0.0
nc.createDimension('Nbed', Nbed)
nc.createDimension('bgc_tracer', len(bgc_names))
bgc = {}
for name in bgc_names:
bgc[name] = nc.createVariable('bgc_'+name, dtype('float32').char, ('ocean_time', 'Nbed', 'eta_rho', 'xi_rho'))
for name in bgc_names:
bgc[name].units = 'milimole meter-3'
for name in bgc_names:
bgc[name][:,:,:,:] = bgc_out[name]
for name in bgc_names:
bgc[name].time = 'ocean_time'
return nc
def test1():
grdfile = '/Users/teruhisa/Dropbox/Data/ob500_grd-8.nc'
inifile = '/Users/teruhisa/Dropbox/Data/ob500_ini_fennelP-8.nc'
#bgcfile = 'rst{}.csv'
make_ini_file(grdfile, inifile, biofile=0)
def test2():
grdfile = '/home/okada/Data/ob500_grd-12_h50_2.nc'
inifile = '/home/okada/Data/ob500_ini_grd-12_2.nc'
make_ini_file(grdfile, inifile, biofile=0)
if __name__ == '__main__':
test2() | mit |
phdowling/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
rs2/pandas | pandas/tests/frame/test_analytics.py | 1 | 45480 | from datetime import timedelta
from decimal import Decimal
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
def assert_stat_op_calc(
opname,
alternative,
frame,
has_skipna=True,
check_dtype=True,
check_dates=False,
rtol=1e-5,
atol=1e-8,
skipna_alternative=None,
):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
rtol : float, default 1e-5
Relative tolerance.
atol : float, default 1e-8
Absolute tolerance.
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
expected_warning = FutureWarning if opname in ["mean", "median"] else None
df = DataFrame({"b": date_range("1/1/2001", periods=2)})
with tm.assert_produces_warning(expected_warning):
result = getattr(df, opname)()
assert isinstance(result, Series)
df["a"] = range(len(df))
with tm.assert_produces_warning(expected_warning):
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative, skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(
result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol
)
# HACK: win32
tm.assert_series_equal(
result1,
frame.apply(wrapper, axis=1),
check_dtype=False,
rtol=rtol,
atol=atol,
)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(
result0,
frame.apply(skipna_wrapper),
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
)
if opname in ["sum", "prod"]:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(
result1, expected, check_dtype=False, rtol=rtol, atol=atol
)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ["sum", "prod"]:
unit = 1 if opname == "prod" else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(
result1, frame.apply(wrapper, axis=1), check_dtype=False
) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(
result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False
)
# bad axis
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == "any":
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(
opname, bool_frame_with_na, float_string_frame, has_bool_only=False
):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed["_bool_"] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics:
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api(
"count", float_frame, float_string_frame, has_numeric_only=True
)
assert_stat_op_api(
"sum", float_frame, float_string_frame, has_numeric_only=True
)
assert_stat_op_api("nunique", float_frame, float_string_frame)
assert_stat_op_api("mean", float_frame, float_string_frame)
assert_stat_op_api("product", float_frame, float_string_frame)
assert_stat_op_api("median", float_frame, float_string_frame)
assert_stat_op_api("min", float_frame, float_string_frame)
assert_stat_op_api("max", float_frame, float_string_frame)
assert_stat_op_api("mad", float_frame, float_string_frame)
assert_stat_op_api("var", float_frame, float_string_frame)
assert_stat_op_api("std", float_frame, float_string_frame)
assert_stat_op_api("sem", float_frame, float_string_frame)
assert_stat_op_api("median", float_frame, float_string_frame)
try:
from scipy.stats import kurtosis, skew # noqa:F401
assert_stat_op_api("skew", float_frame, float_string_frame)
assert_stat_op_api("kurt", float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc(
"nunique",
nunique,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
# GH#32571 check_less_precise is needed on apparently-random
# py37-npdev builds and OSX-PY36-min_version builds
# mixed types (with upcasting happening)
assert_stat_op_calc(
"sum",
np.sum,
mixed_float_frame.astype("float32"),
check_dtype=False,
rtol=1e-3,
)
assert_stat_op_calc(
"sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum
)
assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod
)
assert_stat_op_calc("mad", mad, float_frame_with_na)
assert_stat_op_calc("var", var, float_frame_with_na)
assert_stat_op_calc("std", std, float_frame_with_na)
assert_stat_op_calc("sem", sem, float_frame_with_na)
assert_stat_op_calc(
"count",
count,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
try:
from scipy import kurtosis, skew # noqa:F401
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"median", wrapper, int_frame, check_dtype=False, check_dates=True
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"]
)
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
"a": [
-0.00049987540199591344,
-0.0016467257772919831,
0.00067695870775883013,
],
"b": [-0, -0, 0.0],
"c": [
0.00031111847529610595,
0.0014902627951905339,
-0.00094099200035979691,
],
}
df1 = DataFrame(data, index=["foo", "bar", "baz"], dtype="O")
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype("f8"), method)(1)
if method in ["sum", "prod"]:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame(
{
"int": [1, 2, 3, 4],
"float": [1.0, 2.0, 3.0, 4.0],
"str": ["a", "b", "c", "d"],
}
)
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context("use_bottleneck", False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame(
{
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
"string_data": ["a", "b", "c", "d", "e"],
}
)
df.reindex(columns=["bool_data", "int_data", "string_data"])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(
test.values, np.array([2, 150, "abcde"], dtype=object)
)
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2}))
tm.assert_series_equal(
df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3})
)
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(
df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})
)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1], "B": [pd.Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = pd.Series([1.0], index=["A"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_excludes_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = pd.Series(dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_mean_mixed_string_decimal(self):
# GH 11670
# possible bug when calculating mean of DataFrame?
d = [
{"A": 2, "B": None, "C": Decimal("628.00")},
{"A": 1, "B": None, "C": Decimal("383.00")},
{"A": 3, "B": None, "C": Decimal("651.00")},
{"A": 2, "B": None, "C": Decimal("575.00")},
{"A": 4, "B": None, "C": Decimal("1114.00")},
{"A": 1, "B": "TEST", "C": Decimal("241.00")},
{"A": 2, "B": None, "C": Decimal("572.00")},
{"A": 4, "B": None, "C": Decimal("609.00")},
{"A": 3, "B": None, "C": Decimal("820.00")},
{"A": 5, "B": None, "C": Decimal("1223.00")},
]
df = pd.DataFrame(d)
result = df.mean()
expected = pd.Series([2.7, 681.6], index=["A", "C"])
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize("meth", ["sem", "var", "std"])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=["foo", "bar", "baz"])
# set one entry to a number in str format
df1.loc[0, "foo"] = "100"
df2 = DataFrame(np.random.randn(5, 3), columns=["foo", "bar", "baz"])
# set one entry to a non-number str
df2.loc[0, "foo"] = "a"
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs("bar")
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == "bar"
@pytest.mark.parametrize(
"dropna, expected",
[
(
True,
{
"A": [12],
"B": [10.0],
"C": [1.0],
"D": ["a"],
"E": Categorical(["a"], categories=["a"]),
"F": to_datetime(["2000-1-2"]),
"G": to_timedelta(["1 days"]),
},
),
(
False,
{
"A": [12],
"B": [10.0],
"C": [np.nan],
"D": np.array([np.nan], dtype=object),
"E": Categorical([np.nan], categories=["a"]),
"F": [pd.NaT],
"G": to_timedelta([pd.NaT]),
},
),
(
True,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),
"L": to_datetime(["2000-1-2", "NaT", "NaT", "NaT"]),
"M": to_timedelta(["1 days", "nan", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
(
False,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),
"L": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
"M": to_timedelta(["nan", "1 days", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
],
)
def test_mode_dropna(self, dropna, expected):
df = DataFrame(
{
"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, "a", np.nan],
"E": Categorical([np.nan, np.nan, "a", np.nan]),
"F": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
"G": to_timedelta(["1 days", "nan", "nan", "nan"]),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(["a", np.nan, "a", np.nan]),
"L": to_datetime(["2000-1-2", "2000-1-2", "NaT", "NaT"]),
"M": to_timedelta(["1 days", "nan", "1 days", "nan"]),
"N": np.arange(4, dtype="int64"),
}
)
result = df[sorted(expected.keys())].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, "a", "a"]})
expected = DataFrame({"A": ["a", np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by="A").reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(
dict(
A=date_range("2012-1-1", periods=3, freq="D"),
B=date_range("2012-1-2", periods=3, freq="D"),
C=Timestamp("20120101") - timedelta(minutes=5, seconds=5),
)
)
diffs = DataFrame(dict(A=df["A"] - df["C"], B=df["A"] - df["B"]))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, "A"]
assert result[1] == diffs.loc[0, "B"]
result = diffs.min(axis=1)
assert (result == diffs.loc[0, "B"]).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, "A"]
assert result[1] == diffs.loc[2, "B"]
result = diffs.max(axis=1)
assert (result == diffs["A"]).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df["A"] - df["C"], B=df["B"] - df["A"]))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed["C"] = "foo"
mixed["D"] = 1
mixed["E"] = 1.0
mixed["F"] = Timestamp("20130101")
# results in an object array
result = mixed.min()
expected = Series(
[
pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
"foo",
1,
1.0,
Timestamp("20130101"),
],
index=mixed.columns,
)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.0], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[["A", "B"]].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[["A", "B"]].min()
expected = Series(
[timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"]
)
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame(
{
"time": date_range("20130102", periods=5),
"time2": date_range("20130105", periods=5),
}
)
df["off1"] = df["time2"] - df["time"]
assert df["off1"].dtype == "timedelta64[ns]"
df["off2"] = df["time"] - df["time2"]
df._consolidate_inplace()
assert df["off1"].dtype == "timedelta64[ns]"
assert df["off2"].dtype == "timedelta64[ns]"
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)])
def test_sum_prod_nanops(self, method, unit):
idx = ["a", "b", "c"]
df = pd.DataFrame(
{"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}
)
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype="float64")
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype="float64")
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ["a", "b", "c"]
df = pd.DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_sum_mixed_datetime(self):
# GH#30886
df = pd.DataFrame(
{"A": pd.date_range("2000", periods=4), "B": [1, 2, 3, 4]}
).reindex([2, 3, 4])
result = df.sum()
expected = pd.Series({"B": 7.0})
tm.assert_series_equal(result, expected)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame["bool"] = float_frame["A"] > 0
means = float_frame.mean(0)
assert means["bool"] == float_frame["bool"].values.mean()
def test_mean_datetimelike(self):
# GH#24757 check that datetimelike are excluded by default, handled
# correctly with numeric_only=True
df = pd.DataFrame(
{
"A": np.arange(3),
"B": pd.date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
"D": pd.period_range("2016", periods=3, freq="A"),
}
)
result = df.mean(numeric_only=True)
expected = pd.Series({"A": 1.0})
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# in the future datetime columns will be included
result = df.mean()
expected = pd.Series({"A": 1.0, "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
def test_mean_datetimelike_numeric_only_false(self):
df = pd.DataFrame(
{
"A": np.arange(3),
"B": pd.date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
}
)
# datetime(tz) and timedelta work
result = df.mean(numeric_only=False)
expected = pd.Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
# mean of period is not allowed
df["D"] = pd.period_range("2016", periods=3, freq="A")
with pytest.raises(TypeError, match="mean is not implemented for Period"):
df.mean(numeric_only=False)
def test_mean_extensionarray_numeric_only_true(self):
# https://github.com/pandas-dev/pandas/issues/33256
arr = np.random.randint(1000, size=(10, 5))
df = pd.DataFrame(arr, dtype="Int64")
result = df.mean(numeric_only=True)
expected = pd.DataFrame(arr).mean()
tm.assert_series_equal(result, expected)
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=range(1), columns=range(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis, skipna=skipna)
tm.assert_series_equal(result, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis, skipna=skipna)
tm.assert_series_equal(result, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize("opname", ["any", "all"])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(
opname, getattr(np, opname), bool_frame_with_na, has_skipna=True
)
assert_bool_op_api(
opname, bool_frame_with_na, float_string_frame, has_bool_only=True
)
def test_any_all_extra(self):
df = DataFrame(
{
"A": [True, False, False],
"B": [True, True, False],
"C": [True, True, True],
},
index=["a", "b", "c"],
)
result = df[["A", "B"]].any(1)
expected = Series([True, True, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df[["A", "B"]].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[["C"]].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [
pd.Timestamp("1960-02-15"),
pd.Timestamp("1960-02-16"),
pd.NaT,
pd.NaT,
]
df = DataFrame({"A": float_data, "B": datetime_data})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame(
{"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]}
)
result = df.all(bool_only=True)
expected = Series(dtype=np.bool_)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True],
}
)
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, data, expected",
[
(np.any, {}, False),
(np.all, {}, True),
(np.any, {"A": []}, False),
(np.all, {"A": []}, True),
(np.any, {"A": [False, False]}, False),
(np.all, {"A": [False, False]}, False),
(np.any, {"A": [True, False]}, True),
(np.all, {"A": [True, False]}, False),
(np.any, {"A": [True, True]}, True),
(np.all, {"A": [True, True]}, True),
(np.any, {"A": [False], "B": [False]}, False),
(np.all, {"A": [False], "B": [False]}, False),
(np.any, {"A": [False, False], "B": [False, True]}, True),
(np.all, {"A": [False, False], "B": [False, True]}, False),
# other types
(np.all, {"A": pd.Series([0.0, 1.0], dtype="float")}, False),
(np.any, {"A": pd.Series([0.0, 1.0], dtype="float")}, True),
(np.all, {"A": pd.Series([0, 1], dtype=int)}, False),
(np.any, {"A": pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False),
pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False),
pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True),
pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
(np.all, {"A": pd.Series([0, 1], dtype="category")}, False),
(np.any, {"A": pd.Series([0, 1], dtype="category")}, True),
(np.all, {"A": pd.Series([1, 2], dtype="category")}, True),
(np.any, {"A": pd.Series([1, 2], dtype="category")}, True),
# Mix GH#21484
pytest.param(
np.all,
{
"A": pd.Series([10, 20], dtype="M8[ns]"),
"B": pd.Series([10, 20], dtype="m8[ns]"),
},
True,
),
],
)
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=["a", "b"])).item()
assert result is True
result = np.any(DataFrame(columns=["a", "b"])).item()
assert result is False
@pytest.mark.parametrize("method", ["any", "all"])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product(
[["A", "B"], ["a", "b"]], names=["out", "in"]
),
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level="out")
# ---------------------------------------------------------------------
# Matrix-like
def test_matmul(self):
# matmul test is for GH 10259
a = DataFrame(
np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]
)
b = DataFrame(
np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"]
)
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
assert isinstance(result, DataFrame)
assert result.columns.equals(b.columns)
assert result.index.equals(pd.Index(range(3)))
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result.values, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a["q"] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(
np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
)
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4))
df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3])
with pytest.raises(ValueError, match="aligned"):
operator.matmul(df, df2)
# ---------------------------------------------------------------------
# Unsorted
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip(lower=s, axis=0)
for op in ["lt", "le", "gt", "ge", "eq", "ne"]:
getattr(df, op)(s_nan, axis=0)
class TestDataFrameReductions:
def test_min_max_dt64_with_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]})
res = df.min()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_min_max_dt64_api_consistency_with_NaT(self):
# Calling the following sum functions returned an error for dataframes but
# returned NaT for series. These tests check that the API is consistent in
# min/max calls on empty Series/DataFrames. See GH:33704 for more
# information
df = pd.DataFrame(dict(x=pd.to_datetime([])))
expected_dt_series = pd.Series(pd.to_datetime([]))
# check axis 0
assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT)
assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT)
# check axis 1
tm.assert_series_equal(df.min(axis=1), expected_dt_series)
tm.assert_series_equal(df.max(axis=1), expected_dt_series)
def test_min_max_dt64_api_consistency_empty_df(self):
# check DataFrame/Series api consistency when calling min/max on an empty
# DataFrame/Series.
df = pd.DataFrame(dict(x=[]))
expected_float_series = pd.Series([], dtype=float)
# check axis 0
assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min())
assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max())
# check axis 1
tm.assert_series_equal(df.min(axis=1), expected_float_series)
tm.assert_series_equal(df.min(axis=1), expected_float_series)
@pytest.mark.parametrize(
"initial",
["2018-10-08 13:36:45+00:00", "2018-10-08 13:36:45+03:00"], # Non-UTC timezone
)
@pytest.mark.parametrize("method", ["min", "max"])
def test_preserve_timezone(self, initial: str, method):
# GH 28552
initial_dt = pd.to_datetime(initial)
expected = Series([initial_dt])
df = DataFrame([expected])
result = getattr(df, method)(axis=1)
tm.assert_series_equal(result, expected)
def test_mixed_frame_with_integer_sum():
# https://github.com/pandas-dev/pandas/issues/34520
df = pd.DataFrame([["a", 1]], columns=list("ab"))
df = df.astype({"b": "Int64"})
result = df.sum()
expected = pd.Series(["a", 1], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False, None])
@pytest.mark.parametrize("method", ["min", "max"])
def test_minmax_extensionarray(method, numeric_only):
# https://github.com/pandas-dev/pandas/issues/32651
int64_info = np.iinfo("int64")
ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype())
df = DataFrame({"Int64": ser})
result = getattr(df, method)(numeric_only=numeric_only)
expected = Series(
[getattr(int64_info, method)], index=pd.Index(["Int64"], dtype="object")
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
nicocardiel/pyemir | emirdrp/recipes/image/checks.py | 3 | 8941 | #
# Copyright 2011-2018 Universidad Complutense de Madrid
#
# This file is part of PyEmir
#
# PyEmir is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyEmir is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyEmir. If not, see <http://www.gnu.org/licenses/>.
#
"""Routines shared by image mode recipes"""
import logging
import operator
import six
import numpy
from astropy.io import fits
import sep
import matplotlib.pyplot as plt
from emirdrp.util.sextractor import SExtractor
from .naming import name_skysub_proc
_logger = logging.getLogger(__name__)
# Actions to carry over images when checking the flux
# of the objects in different images
def warn_action(img):
_logger.warning('Image %s has low flux in objects', img.baselabel)
img.valid_science = True
def reject_action(img):
img.valid_science = False
_logger.info('Image %s rejected, has low flux in objects', img.baselabel)
pass
def default_action(img):
_logger.info(
'Image %s accepted, has correct flux in objects', img.baselabel)
img.valid_science = True
# Actions
_dactions = {'warn': warn_action,
'reject': reject_action, 'default': default_action}
def check_photometry(frames, sf_data, seeing_fwhm, step=0,
border=300, extinction=0.0,
check_photometry_levels=[0.5, 0.8],
check_photometry_actions=['warn', 'warn', 'default'],
figure=None):
# Check photometry of few objects
weigthmap = 'weights4rms.fits'
wmap = numpy.ones_like(sf_data[0], dtype='bool')
# Center of the image
wmap[border:-border, border:-border] = 0
# fits.writeto(weigthmap, wmap.astype('uintt8'), overwrite=True)
basename = 'result_i%0d.fits' % (step)
data_res = fits.getdata(basename)
data_res = data_res.byteswap().newbyteorder()
bkg = sep.Background(data_res)
data_sub = data_res - bkg
_logger.info('Runing source extraction tor in %s', basename)
objects = sep.extract(data_sub, 1.5, err=bkg.globalrms, mask=wmap)
# if seeing_fwhm is not None:
# sex.config['SEEING_FWHM'] = seeing_fwhm * sex.config['PIXEL_SCALE']
# sex.config['PARAMETERS_LIST'].append('CLASS_STAR')
# sex.config['CATALOG_NAME'] = 'master-catalogue-i%01d.cat' % step
LIMIT_AREA = 5000
idx_small = objects['npix'] < LIMIT_AREA
objects_small = objects[idx_small]
NKEEP = 15
idx_flux = objects_small['flux'].argsort()
objects_nth = objects_small[idx_flux][-NKEEP:]
# set of indices of the N first objects
fluxes = []
errors = []
times = []
airmasses = []
for idx, frame in enumerate(frames):
imagename = name_skysub_proc(frame.baselabel, step)
#sex.config['CATALOG_NAME'] = ('catalogue-%s-i%01d.cat' %
# (frame.baselabel, step))
# Lauch SExtractor on a FITS file
# om double image mode
_logger.info('Runing sextractor in %s', imagename)
with fits.open(imagename) as hdul:
header = hdul[0].header
airmasses.append(header['airmass'])
times.append(header['tstamp'])
data_i = hdul[0].data
data_i = data_i.byteswap().newbyteorder()
bkg_i = sep.Background(data_i)
data_sub_i = data_i - bkg_i
# objects_i = sep.extract(data_sub_i, 1.5, err=bkg_i.globalrms, mask=wmap)
flux_i, fluxerr_i, flag_i = sep.sum_circle(data_sub_i,
objects_nth['x'], objects_nth['y'],
3.0, err=bkg_i.globalrms)
# Extinction correction
excor = pow(10, -0.4 * frame.airmass * extinction)
flux_i = excor * flux_i
fluxerr_i = excor * fluxerr_i
fluxes.append(flux_i)
errors.append(fluxerr_i)
fluxes_a = numpy.array(fluxes)
errors_a = numpy.array(errors)
fluxes_n = fluxes_a / fluxes_a[0]
errors_a = errors_a / fluxes_a[0] # sigma
w = 1.0 / (errors_a) ** 2
# weighted mean of the flux values
wdata = numpy.average(fluxes_n, axis=1, weights=w)
wsigma = 1 / numpy.sqrt(w.sum(axis=1))
levels = check_photometry_levels
actions = check_photometry_actions
x = list(six.moves.range(len(frames)))
vals, (_, sigma) = check_photometry_categorize(
x, wdata, levels, tags=actions)
# n sigma level to plt
nsig = 3
if True:
figure = plt.figure()
ax = figure.add_subplot(111)
plot_photometry_check(ax, vals, wsigma, check_photometry_levels, nsig * sigma)
plt.savefig('figure-relative-flux_i%01d.png' % step)
for x, _, t in vals:
try:
action = _dactions[t]
except KeyError:
_logger.warning('Action named %s not recognized, ignoring', t)
action = default_action
for p in x:
action(frames[p])
def check_photometry_categorize(x, y, levels, tags=None):
'''Put every point in its category.
levels must be sorted.'''
x = numpy.asarray(x)
y = numpy.asarray(y)
ys = y.copy()
ys.sort()
# Mean of the upper half
m = ys[len(ys) // 2:].mean()
y /= m
m = 1.0
s = ys[len(ys) // 2:].std()
result = []
if tags is None:
tags = list(six.moves.range(len(levels) + 1))
for l, t in zip(levels, tags):
indc = y < l
if indc.any():
x1 = x[indc]
y1 = y[indc]
result.append((x1, y1, t))
x = x[~indc]
y = y[~indc]
else:
result.append((x, y, tags[-1]))
return result, (m, s)
def plot_photometry_check(ax, vals, errors, levels, nsigma):
x = range(len(errors))
ax.set_title('Relative flux of brightest object')
for v, c in zip(vals, ['b', 'r', 'g', 'y']):
ax.scatter(v[0], v[1], c=c)
w = errors[v[0]]
ax.errorbar(v[0], v[1], yerr=w, fmt='none', c=c)
ax.plot([x[0], x[-1]], [1, 1], 'r--')
ax.plot([x[0], x[-1]], [1 - nsigma, 1 - nsigma], 'b--')
for f in levels:
ax.plot([x[0], x[-1]], [f, f], 'g--')
return ax
def check_position(images_info, sf_data, seeing_fwhm, step=0):
# FIXME: this method has to be updated
_logger.info('Checking positions')
# Check position of bright objects
weigthmap = 'weights4rms.fits'
wmap = numpy.zeros_like(sf_data[0])
# Center of the image
border = 300
wmap[border:-border, border:-border] = 1
fits.writeto(weigthmap, wmap.astype('uint8'), overwrite=True)
basename = 'result_i%0d.fits' % (step)
sex = SExtractor()
sex.config['VERBOSE_TYPE'] = 'QUIET'
sex.config['PIXEL_SCALE'] = 1
sex.config['BACK_TYPE'] = 'AUTO'
if seeing_fwhm is not None and seeing_fwhm > 0:
sex.config['SEEING_FWHM'] = seeing_fwhm * sex.config['PIXEL_SCALE']
sex.config['WEIGHT_TYPE'] = 'MAP_WEIGHT'
sex.config['WEIGHT_IMAGE'] = weigthmap
sex.config['PARAMETERS_LIST'].append('FLUX_BEST')
sex.config['PARAMETERS_LIST'].append('FLUXERR_BEST')
sex.config['PARAMETERS_LIST'].append('FWHM_IMAGE')
sex.config['PARAMETERS_LIST'].append('CLASS_STAR')
sex.config['CATALOG_NAME'] = 'master-catalogue-i%01d.cat' % step
_logger.info('Runing sextractor in %s', basename)
sex.run('%s,%s' % (basename, basename))
# Sort catalog by flux
catalog = sex.catalog()
catalog = sorted(
catalog, key=operator.itemgetter('FLUX_BEST'), reverse=True)
# set of indices of the N first objects
OBJS_I_KEEP = 10
# master = [(obj['X_IMAGE'], obj['Y_IMAGE'])
# for obj in catalog[:OBJS_I_KEEP]]
for image in images_info:
imagename = name_skysub_proc(image.baselabel, step)
sex.config['CATALOG_NAME'] = ('catalogue-self-%s-i%01d.cat' %
(image.baselabel, step))
# Lauch SExtractor on a FITS file
# on double image mode
_logger.info('Runing sextractor in %s', imagename)
sex.run(imagename)
catalog = sex.catalog()
# data = [(obj['X_IMAGE'], obj['Y_IMAGE']) for obj in catalog]
# tree = KDTree(data)
# Search 2 neighbors
# dists, _ids = tree.query(master, 2, distance_upper_bound=5)
# for i in dists[:,0]:
# print i
# _logger.info('Mean offset correction for image %s is %f',
# imagename, dists[:,0].mean())
# raw_input('press any key')
| gpl-3.0 |
jvivian/rnaseq-lib | src/rnaseq_lib/graphs/__init__.py | 1 | 1102 | import os
from itertools import combinations
import pandas as pd
def output_gephi(df, node_attrs, output_dir):
"""
Output two CSVs used by Gephi to create graphs:
nodes.csv
edges.csv
:param pd.DataFrame df: Dataframe of input data
:param list(str) node_attrs: List of attributes in DF to create nodes from
:param str output_dir: Path to output directory
"""
# Create nodes
nodes = []
for attr in node_attrs:
nodes.extend(map(lambda x: (x, attr), df[attr].tolist()))
nodes = pd.DataFrame.from_records(nodes, columns=('Label', 'Type'))
# Create edges
edges = set()
for row in df.iterrows():
i, row = row
for j, k in combinations(node_attrs, 2):
edges.add((int(nodes[nodes.Label == row[j]].index[0]), int(nodes[nodes.Label == row[k]].index[0])))
edges = pd.DataFrame.from_records(list(edges), columns=('Source', 'Target'))
# Output
nodes.to_csv(os.path.join(output_dir, 'nodes.csv'), index=True, index_label='Id')
edges.to_csv(os.path.join(output_dir, 'edges.csv'), index=False)
| mit |
asteca/ASteCA | packages/out/make_D1_plot.py | 1 | 7630 |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from os.path import join
from . import add_version_plot
from . import mp_mcmc_cnvrg
from . import tracePlot
from . import prep_plots
from . prep_plots import figsize_x, figsize_y, grid_x, grid_y
def main(npd, pd, clp):
"""
Make D1 block plots.
"""
fig = plt.figure(figsize=(figsize_x, figsize_y))
gs = gridspec.GridSpec(grid_y, grid_x)
fit_pars = clp['isoch_fit_params']
# Title and version for the plot.
m_bf, s_bf = divmod(fit_pars['bf_elapsed'], 60)
h_bf, m_bf = divmod(m_bf, 60)
xf, yf = .02, .999
add_version_plot.main(x_fix=xf, y_fix=yf)
nwalkers, nburn, nsteps, pt_adapt = pd['nwalkers_mcee'],\
pd['nburn_mcee'], fit_pars['N_steps'],\
pd['pt_adapt']
p_str = (
"chains={:.0f}, burn={:.2f}, steps={:.0f},"
" adapt={}").format(nwalkers, nburn, nsteps[-1], pt_adapt)
xt, yt = .5, 1.005
plt.suptitle(
("{} | {:.0f}h{:.0f}m").format(p_str, h_bf, m_bf), x=xt, y=yt,
fontsize=11)
# DEPRECATED May 2020
# if pd['best_fit_algor'] == 'boot+GA':
# m_btsr, s_btsr = divmod(isoch_fit_params['btstrp_t'], 60)
# h_btsr, m_btsr = divmod(m_btsr, 60)
# p_str = (
# r"$N_{{pop}}={},\,N_{{gen}}={},\,N_{{btstrp}}={}$ "
# "({:.0f}h{:.0f}m)").format(
# pd['N_pop'], isoch_fit_params['OF_steps'],
# isoch_fit_params['N_bootstrap'], h_btsr, m_btsr)
# if isoch_fit_params['N_bootstrap'] == 0:
# xt = 0.83
# xf, yf = .67, 1.01
# elif pd['best_fit_algor'] == 'ptemcee':
# elif pd['best_fit_algor'] == 'emcee':
# nwalkers, nburn, nsteps = pd['nwalkers_mcee'],\
# pd['nburn_mcee'], isoch_fit_params['N_steps']
# p_str = (
# "[{}] chains={:.0f}, burn={:.2f}, steps={:.0f} ").format(
# ' '.join(pd['emcee_moves']), nwalkers, nburn, nsteps[-1])
# Best fitting process plots.
# if pd['best_fit_algor'] == 'boot+GA':
# if isoch_fit_params['N_bootstrap'] > 2:
# pos0 = (0, 2, 4, 8)
# pos1 = (
# (0, 2, 2, 4), (2, 4, 4, 6), (4, 6, 6, 8),
# (6, 8, 8, 10), (8, 10, 10, 12), (6, 8, 10, 12))
# min_max_p = prep_plots.param_ranges(
# pd['best_fit_algor'], pd['fundam_params'],
# isoch_fit_params['varIdxs'],
# isoch_fit_params['params_boot'])
# sharedPlots(pd, npd, isoch_fit_params, gs, min_max_p)
# else:
# pos0 = (0, 2, 8, 12)
# pos1 = (
# (2, 4, 8, 10), (2, 4, 10, 12), (4, 6, 8, 10),
# (4, 6, 10, 12), (6, 8, 8, 10), (6, 8, 10, 12))
# min_max_p = prep_plots.param_ranges(
# pd['best_fit_algor'], pd['fundam_params'])
# args = [
# # pl_GA_lkl: Likelihood evolution for the GA.
# gs, pos0, isoch_fit_params['lkl_best'],
# isoch_fit_params['lkl_mean'], isoch_fit_params['OF_models'],
# isoch_fit_params['new_bs_indx'], pd['fit_diff'],
# pd['cross_prob'], pd['cross_sel'], pd['mut_prob'],
# pd['N_el'], pd['N_ei'], pd['N_es']
# ]
# mp_best_fit1_GA.plot(0, *args)
# arglist = [
# # pl_lkl_scatt: Parameter likelihood density plot.
# [gs, pos1, r'$z$', min_max_p, isoch_fit_params['map_sol'],
# isoch_fit_errors, isoch_fit_params['models_GA'],
# isoch_fit_params['lkls_GA']],
# [gs, pos1, r'$log(age)$', min_max_p,
# isoch_fit_params['map_sol'], isoch_fit_errors,
# isoch_fit_params['models_GA'], isoch_fit_params['lkls_GA']],
# [gs, pos1, r'$E_{{(B-V)}}$', min_max_p,
# isoch_fit_params['map_sol'], isoch_fit_errors,
# isoch_fit_params['models_GA'], isoch_fit_params['lkls_GA']],
# [gs, pos1, r'$(m-M)_o$', min_max_p,
# isoch_fit_params['map_sol'], isoch_fit_errors,
# isoch_fit_params['models_GA'], isoch_fit_params['lkls_GA']],
# [gs, pos1, r'$M\,(M_{{\odot}})$', min_max_p,
# isoch_fit_params['map_sol'], isoch_fit_errors,
# isoch_fit_params['models_GA'], isoch_fit_params['lkls_GA']],
# [gs, pos1, r'$b_{{frac}}$', min_max_p,
# isoch_fit_params['map_sol'], isoch_fit_errors,
# isoch_fit_params['models_GA'], isoch_fit_params['lkls_GA']]
# ]
# for n, args in enumerate(arglist, 1):
# mp_best_fit1_GA.plot(n, *args)
# elif pd['best_fit_algor'] in ('ptemcee', 'emcee'): # 'abc'
# Trace plots
min_max_p = prep_plots.param_ranges(
pd['fundam_params'], fit_pars['varIdxs'],
fit_pars['pars_chains'])
trace = fit_pars['mcmc_trace']
best_sol = fit_pars['mean_sol']
traceplot_args = (
fit_pars['acorr_t'], fit_pars['med_at_c'], fit_pars['mcmc_ess'])
post_trace, pre_trace = fit_pars['pars_chains'], fit_pars['pars_chains_bi']
# pl_param_chain: Parameters sampler chains.
par_list = ['metal', 'age', 'ext', 'dist', 'mass', 'binar']
for p in par_list:
args = [
p, gs, best_sol, min_max_p, traceplot_args, trace,
fit_pars['varIdxs'], post_trace, pre_trace]
tracePlot.plot(0, *args)
# Parallel Coordinates plot
# mp_mcmc_cnvrg.plot(2, *args)
# pl_MAP_lkl: Parameters half of pdfs.
args = [
gs, fit_pars['N_steps'], fit_pars['prob_mean'],
fit_pars['map_lkl'], fit_pars['map_lkl_final']]
mp_mcmc_cnvrg.plot(0, *args)
# if pd['best_fit_algor'] == 'ptemcee':
# pl_betas: Betas vs steps.
args = [gs, fit_pars['Tmax'], fit_pars['N_steps'], fit_pars['betas_pt']]
mp_mcmc_cnvrg.plot(2, *args)
# pl_Tswaps: Tswaps AFs vs steps.
args = [gs, fit_pars['N_steps'], fit_pars['tswaps_afs']]
mp_mcmc_cnvrg.plot(3, *args)
# pl_MAF: Parameters evolution of MAF.
maf_steps = fit_pars['maf_allT']
# if pd['best_fit_algor'] == 'ptemcee':
# elif pd['best_fit_algor'] == 'emcee':
# maf_steps = fit_pars['maf_steps']
args = [gs, fit_pars['N_steps'], maf_steps]
mp_mcmc_cnvrg.plot(1, *args)
# pl_tau
args = [gs, fit_pars['N_steps'], fit_pars['tau_autocorr']]
mp_mcmc_cnvrg.plot(4, *args)
# TODO re-implement when/if code is fixed
# # pl_mESS
# args = [
# 'mESS', gs, fit_pars['mESS'],
# fit_pars['minESS'],
# fit_pars['mESS_epsilon']]
# mp_mcmc_cnvrg.plot(7, *args)
# pl_lags
args = [gs, fit_pars['varIdxs'], fit_pars['acorr_function']]
mp_mcmc_cnvrg.plot(5, *args)
# pl_GW
args = [gs, fit_pars['varIdxs'], fit_pars['geweke_z']]
mp_mcmc_cnvrg.plot(6, *args)
# pl_tau_histo
args = [gs, fit_pars['all_taus']]
mp_mcmc_cnvrg.plot(7, *args)
# Generate output file.
fig.tight_layout()
plt.savefig(join(
npd['output_subdir'], str(npd['clust_name']) + '_D1_'
+ pd['best_fit_algor'] + npd['ext']))
# Close to release memory.
plt.clf()
plt.close("all")
# DEPRECATED May 2020
# def sharedPlots(pd, npd, isoch_fit_params, gs, min_max_p):
# """
# """
# # if pd['best_fit_algor'] == 'boot+GA':
# # trace = isoch_fit_params['params_boot']
# # msol = 'ML'
# # best_sol = isoch_fit_params['map_sol']
# # traceplot_args = []
# # post_trace, pre_trace = isoch_fit_params['params_boot'], None
# # elif pd['best_fit_algor'] in ('ptemcee', 'emcee'):
| gpl-3.0 |
mugizico/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
jakirkham/bokeh | examples/plotting/file/custom_tooltip.py | 3 | 1902 | import pandas as pd
from bokeh.plotting import figure, show
from bokeh.sampledata.periodic_table import elements
elements = elements.copy()
elements = elements[elements.group != "-"]
elements.sort_values('metal', inplace=True)
colormap = {
"alkali metal" : "#a6cee3",
"alkaline earth metal" : "#1f78b4",
"halogen" : "#fdbf6f",
"metal" : "#b2df8a",
"metalloid" : "#33a02c",
"noble gas" : "#bbbb88",
"nonmetal" : "#baa2a6",
"transition metal" : "#e08e79",
}
data=dict(
atomic_number=elements["atomic number"],
sym=elements["symbol"],
name=elements["name"],
atomic_mass = pd.to_numeric(elements['atomic mass'], errors="coerce"),
density=elements['density'],
metal=[x.title() for x in elements["metal"]],
type_color=[colormap[x] for x in elements["metal"]]
)
mass_format = '{0.00}'
TOOLTIPS = """
<div style="width: 62px; height: 62px; opacity: .8; padding: 5px; background-color: @type_color;>
<h1 style="margin: 0; font-size: 12px;"> @atomic_number </h1>
<h1 style="margin: 0; font-size: 24px;"><strong> @sym </strong></h1>
<p style=" margin: 0; font-size: 8px;"><strong> @name </strong></p>
<p style="margin: 0; font-size: 8px;"> @atomic_mass{mass_format} </p>
</div>
""".format(mass_format=mass_format)
p = figure(plot_width=900, plot_height=450, tooltips=TOOLTIPS, title='Densities by Atomic Mass')
p.background_fill_color = "#fafafa"
p.circle('atomic_mass', 'density', size=12, source=data, color='type_color',
line_color="black", legend='metal', alpha=0.9)
p.legend.glyph_width = 30
p.legend.glyph_height = 30
p.xaxis.axis_label= 'Atomic Mass'
p.yaxis.axis_label= 'Density'
p.xgrid.grid_line_color = None
p.toolbar_location = None
l = p.legend[0]
l.plot = None
p.add_layout(l, 'right')
l.border_line_color = None
show(p)
| bsd-3-clause |
annahs/atmos_research | NC_size_distrs_with_variable_coating_bin_at_end_avg_flights.py | 1 | 17996 | from pymiecoated import Mie
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import calendar
from scipy.optimize import curve_fit
cloud_droplet_conc = 0.5
min_coat = 0 #assumed minimum coating thickness for particles with LEO failure or outside of detection range = 0
max_coat = 100 #assumed maximum coating thickness for particles with LEO failure or outside of detection range = 100
#distr parameters
min_BC_VED = 85
max_BC_VED = 220
min_rBC_mass = ((min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
max_rBC_mass = ((max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
fit_bin_min = 50
fit_bin_max = 1000
bin_incr = 10
master_bin_file = 'UHSAS_Polar6_20150408_R1_V2.ict'
#master_bin_file = 'UHSAS_Polar6_20150413_R1_V1.ict'
flight_times = {
#'science 1' : [datetime(2015,4,5,9,0), datetime(2015,4,5,14,0) ,''],
#'ferry 1' : [datetime(2015,4,6,9,0), datetime(2015,4,6,11,0) ,'UHSAS_Polar6_20150406_R1_V1.ict'],
#'ferry 2' : [datetime(2015,4,6,15,0), datetime(2015,4,6,18,0) ,'UHSAS_Polar6_20150406_R1_V2.ict'],
'science 2' : [datetime(2015,4,7,16,0), datetime(2015,4,7,21,0) ,'UHSAS_Polar6_20150407_R1_V1.ict'],
'science 3' : [datetime(2015,4,8,13,0), datetime(2015,4,8,17,0) ,'UHSAS_Polar6_20150408_R1_V1.ict'],
'science 4' : [datetime(2015,4,8,17,30),datetime(2015,4,8,22,0) ,'UHSAS_Polar6_20150408_R1_V2.ict'],
#'science 5' : [datetime(2015,4,9,13,30),datetime(2015,4,9,18,0) ,'UHSAS_Polar6_20150409_R1_V1.ict'],
#'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),'UHSAS_Polar6_20150410_R1_V1.ict'],
'science 6' : [datetime(2015,4,11,15,0),datetime(2015,4,11,22,0),'UHSAS_Polar6_20150411_R1_V1.ict'],
'science 7' : [datetime(2015,4,13,15,0),datetime(2015,4,13,21,0),'UHSAS_Polar6_20150413_R1_V1.ict'],
#'science 8' : [datetime(2015,4,20,15,0),datetime(2015,4,20,20,0),'UHSAS_Polar6_20150420_R1_V1.ict'],
#'science 9' : [datetime(2015,4,20,21,0),datetime(2015,4,21,2,0) ,'UHSAS_Polar6_20150420_R1_V2.ict'],
#'science 10': [datetime(2015,4,21,16,8),datetime(2015,4,21,16,18),'UHSAS_Polar6_20150421_R1_V1.ict'], ###
}
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def myRound(x, base):
return int(base * round(float(x)/base))
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def getUHSASBins(UHSAS_file):
#UHSAS_file = 'UHSAS_Polar6_20150413_R1_V1.ict'
os.chdir('C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/UHSAS/UHSAS-R1/')
with open(UHSAS_file, 'r') as f:
file_date = UHSAS_file[13:21]
date = datetime.strptime(file_date, '%Y%m%d')
##get bin limits
i=0
while i < 9: #indep_var_number is always on line 10
f.readline()
i+=1
indep_var_number = float(f.readline())
i=0
while i < (indep_var_number + 11): #check that 11 is right for each set of files
f.readline()
i+=1
bin_LL_line = (f.readline()).split()
f.readline() #skip this line
bin_UL_line = (f.readline()).split()
##create bins dict
bin_dict = {}
i=0
for LL_limit in bin_LL_line:
bin_dict[i] = [float(LL_limit),float(bin_UL_line[i]),[]]
i+=1
return bin_dict
def make_bin_dict():
new_dict = {}
print fit_bin_min,fit_bin_max,bin_incr
for bin in range(fit_bin_min,fit_bin_max,bin_incr):
new_dict[bin] = [bin,(bin+bin_incr),[]]
return new_dict
def sampling_time_in_flight(start_time,end_time):
cursor.execute(('''SELECT ftd.UNIX_UTC_ts
FROM polar6_flight_track_details ftd
JOIN polar6_fssp_cloud_data fssp on ftd.fssd_id = fssp.id
WHERE ftd.UNIX_UTC_ts >= %s and ftd.UNIX_UTC_ts < %s and fssp.FSSPTotalConc <=%s ORDER BY ftd.UNIX_UTC_ts'''),
(start_time,end_time,cloud_droplet_conc))
alt_data = cursor.fetchall()
first_line = True
temp_list = []
interval_list = []
for line in alt_data:
current_ts = line[0]
if first_line == True:
prior_ts = current_ts
first_line = False
if (current_ts - prior_ts) <= 1:
temp_list.append(current_ts)
prior_ts = current_ts
else:
time_interval = (temp_list[-1]-temp_list[0]) #in sec
interval_list.append(time_interval)
temp_list = []
temp_list.append(current_ts)
prior_ts = current_ts
#add in last interval
if len(temp_list):
time_interval = (temp_list[-1]-temp_list[0]) #in sec
interval_list.append(time_interval)
total_sampling_time = np.sum(interval_list)
else:
total_sampling_time = 0
return total_sampling_time
def assemble_bin_data(retrieved_records, flight_data):
#parse each row in results
for row in retrieved_records:
mass = row[0]
coat = row[1]
LEO_amp = row[2]
sample_flow = row[3]
temperature = row[4] + 273.15 #convert to Kelvin
pressure = row[5]
VED = (((mass/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
STP_correction_factor = (101325/pressure)*(temperature/273.15)
#succesful LEO fitting
if (0 <= LEO_amp < 45000) and coat != None: #failed LEO fits give neg LEO amps (e.g -2)
#if coat >= 0:
coat_min = coat
coat_max = coat
#failed LEO fitting or neg coating, we calc a max and min case for these
else:
coat_min = min_coat
coat_max = max_coat
flight_data['core_dia'].append(VED)
flight_data['min_coated_dia'].append(VED+2*coat_min)
flight_data['max_coated_dia'].append(VED+2*coat_max)
flight_data['STP_correction_factor'].append(STP_correction_factor)
flight_data['sample_flow'].append(sample_flow)
return flight_data
def make_binned_list(raw_dia_list, bin_dict,total_vol_sccm):
for dia in raw_dia_list:
for point in bin_dict:
LL_bin = bin_dict[point][0]
UL_bin = bin_dict[point][1]
if (LL_bin <= dia < UL_bin):
bin_dict[point][2].append(dia)
dia_list = []
area = 0
for point in bin_dict:
bin_ll = bin_dict[point][0]
bin_ul = bin_dict[point][1]
bin_mid = bin_ll + (bin_ul-bin_ll)/2
number = len(bin_dict[point][2])
number_conc = number/total_vol_sccm #in #/cm3
norm_number_conc = number_conc/(math.log((bin_ul))-math.log(bin_ll))
area = area+norm_number_conc
dia_list.append([bin_ll,bin_ul,bin_mid,number,number_conc,norm_number_conc])
dia_list.sort()
print 'area',area
return dia_list
def fit_distr(core_dia_list, instr):
#don't fit to bins with no data, this messes with the fit
if instr == 'SP2':
core_bin_midpoints = []
core_number_conc_norm = []
for row in core_dia_list:
if min_BC_VED <= row[2] < max_BC_VED:
core_bin_midpoints.append(row[2])
core_number_conc_norm.append(row[5])
if instr == 'UHSAS':
core_bin_midpoints = []
core_number_conc_norm = []
for row in core_dia_list:
if 70 <= row[2] < 700:
core_bin_midpoints.append(row[2])
core_number_conc_norm.append(row[5])
#core
try:
popt, pcov = curve_fit(lognorm, np.array(core_bin_midpoints), np.array(core_number_conc_norm), p0=(2000,0.6,110))
except Exception,e:
popt = np.nan
print str(e)
print 'number fit failure'
return popt
def calc_bin_values(flight_data,UNIX_start_time,UNIX_end_time,UHSAS_file):
mean_sample_flow = np.nanmean(flight_data['sample_flow']) #in cm2/min
mean_STP_correction_factor = np.nanmean(flight_data['STP_correction_factor']) #no units
total_samping_time = 0
for flight in flight_times:
print flight
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UHSAS_file = flight_times[flight][2]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
total_samping_time = total_samping_time + sampling_time_in_flight(UNIX_start_time,UNIX_end_time)
total_vol = mean_sample_flow*total_samping_time/60 #factor of 60 to convert minutes to secs, result is in cc
total_vol_sccm = total_vol/mean_STP_correction_factor #factor of 60 to convert minutes to secs, result is in cc
print 'stp',mean_STP_correction_factor
print total_vol, total_vol_sccm
#cores
binned_core_dias = getUHSASBins(master_bin_file)
#binned_core_dias = make_bin_dict()
core_diameter_list = make_binned_list(flight_data['core_dia'],binned_core_dias,total_vol_sccm)
core_dia_fit_func = fit_distr(core_diameter_list,'SP2')
area = 0
fit_core_dias_list = []
for point in binned_core_dias:
bin_ll = binned_core_dias[point][0]
bin_ul = binned_core_dias[point][1]
bin_mid = bin_ll + (bin_ul-bin_ll)/2
fit_core_number_conc_norm = lognorm(bin_mid, core_dia_fit_func[0], core_dia_fit_func[1], core_dia_fit_func[2])
fit_core_number_conc = fit_core_number_conc_norm*(math.log((bin_ul))-math.log(bin_ll))
fit_core_number = fit_core_number_conc*total_vol_sccm
fit_core_dias_list.append([bin_ll,bin_ul,bin_mid,fit_core_number,fit_core_number_conc,fit_core_number_conc_norm])
area+= fit_core_number_conc_norm
if (bin_ul < min_BC_VED) or (max_BC_VED < bin_ll):
i=0
while i < fit_core_number:
flight_data['min_coated_dia'].append(bin_mid)
flight_data['max_coated_dia'].append(bin_mid+2*max_coat)
i+=1
print 'fit area',area
fit_core_dias_list.sort()
#min coats
binned_min_coat_dias = getUHSASBins(master_bin_file)
#binned_min_coat_dias = make_bin_dict()
min_coated_dia_list = make_binned_list(flight_data['min_coated_dia'],binned_min_coat_dias,total_vol_sccm)
#max coats
binned_max_coat_dias = getUHSASBins(master_bin_file)
#binned_max_coat_dias = make_bin_dict()
max_coated_dia_list = make_binned_list(flight_data['max_coated_dia'],binned_max_coat_dias,total_vol_sccm)
return [core_diameter_list,min_coated_dia_list,max_coated_dia_list,fit_core_dias_list]
def plot_distrs(binned_data,UHSAS_list,UHSAS_fitted_list):
core_bin_mids = [row[2] for row in binned_data[0]]
core_norm_conc = [row[5] for row in binned_data[0]]
min_coat_bin_mids = [row[2] for row in binned_data[1]]
min_coat_norm_conc = [row[5] for row in binned_data[1]]
max_coat_bin_mids = [row[2] for row in binned_data[2]]
max_coat_norm_conc = [row[5] for row in binned_data[2]]
core_fit_bin_mids = [row[2] for row in binned_data[3]]
core_fit_norm_conc = [row[5] for row in binned_data[3]]
uhsas_bin_mids = [row[0] for row in UHSAS_list]
uhsas_norm_conc = [row[1] for row in UHSAS_list]
uhsas_fitted_bin_mids = [row[2] for row in UHSAS_fitted_list]
uhsas_fitted_norm_conc = [row[5] for row in UHSAS_fitted_list]
print sum(core_fit_norm_conc)
print sum(uhsas_fitted_norm_conc)
i=0
min_ratios = []
max_ratios = []
for conc in min_coat_norm_conc:
min_ratio = conc/uhsas_norm_conc[i]
max_ratio = max_coat_norm_conc[i]/uhsas_norm_conc[i]
min_ratios.append(min_ratio)
max_ratios.append(max_ratio)
i+=1
print np.nanmean(min_ratios)
print np.nanmean(max_ratios)
#plots
ticks = [50,60,70,80,100,120,160,200,300,400,600,800]
fig = plt.figure(figsize= (12,10))
ax1 = fig.add_subplot(111)
ax1.plot(uhsas_bin_mids,uhsas_norm_conc, color = 'g',marker='s', label = 'total particles (UHSAS)')
ax1.scatter(core_bin_mids,core_norm_conc, color = 'k',marker='o', label='rBC cores only')
ax1.plot(core_fit_bin_mids,core_fit_norm_conc, color = 'grey',marker='',linestyle = '--', label = 'rBC core fit')
ax1.plot(min_coat_bin_mids,min_coat_norm_conc, color = 'b',marker='<', label='rBC cores + min coating')
ax1.plot(max_coat_bin_mids,max_coat_norm_conc, color = 'r',marker='>', label='rBC cores + max coating')
#ax1.plot(uhsas_fitted_bin_mids,uhsas_fitted_norm_conc, color = 'g',marker='',linestyle = '--', label = 'UHSAS fit')
#ax1.plot(uhsas_bin_mids,min_ratios, color = 'c',marker='o', label = 'min/UHSAS')
#ax1.plot(uhsas_bin_mids,max_ratios, color = 'orange',marker='o', label = 'max/UHSAS')
ax1.set_xlabel('VED (nm)')
ax1.set_ylabel('dN/dlog(VED)')
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
ax1.xaxis.set_major_locator(plt.FixedLocator(ticks))
ax1.set_ylim(1,3500)
ax1.set_xlim(50,1000)
plt.legend()
plt.show()
ticks = [50,60,70,80,100,120,160,200,300,400,600,800]
fig = plt.figure(figsize= (12,10))
ax2 = fig.add_subplot(111)
ax2.plot(uhsas_bin_mids,min_ratios, color = 'b',marker='o',linestyle = '-', label = 'rBC cores with min coating/UHSAS')
ax2.plot(uhsas_bin_mids,max_ratios, color = 'r',marker='s',linestyle = '-', label = 'rBC cores with max coating/UHSAS')
ax2.set_xlabel('VED (nm)')
ax2.set_ylabel('fraction of total particles containing rBC')
ax2.set_xscale('log')
#ax2.set_yscale('log')
ax2.xaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
ax2.xaxis.set_major_locator(plt.FixedLocator(ticks))
ax2.set_ylim(0,1)
ax2.set_xlim(50,1000)
plt.legend()
plt.show()
def write_files(binned_data):
#cores
file = open('C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/coating data/SP2 core data-'+flight+'.txt', 'w')
file.write('bin_LL\tbin_UL\tbin_mid\tbin_number\tbin_number_conc\tbin_number_conc_norm' + '\n')
for row in binned_data[0]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
#min_coats
file = open('C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/coating data/SP2 min coat data-'+flight+'.txt', 'w')
file.write('bin_LL\tbin_UL\tbin_mid\tbin_number\tbin_number_conc\tbin_number_conc_norm' + '\n')
for row in binned_data[1]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
#max_coats
file = open('C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/coating data/SP2 max coat data-'+flight+'.txt', 'w')
file.write('bin_LL\tbin_UL\tbin_mid\tbin_number\tbin_number_conc\tbin_number_conc_norm' + '\n')
for row in binned_data[2]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
#fit cores
file = open('C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/coating data/SP2 core fit data-'+flight+'.txt', 'w')
file.write('bin_LL\tbin_UL\tbin_mid\tbin_number\tbin_number_conc\tbin_number_conc_norm' + '\n')
for row in binned_data[3]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
##script
#set up data structure
flight_data = {
'core_dia':[],
'min_coated_dia':[],
'max_coated_dia':[],
'STP_correction_factor':[],
'sample_flow':[],
}
UHSAS_dict = {}
master_bin_list = getUHSASBins(master_bin_file)
for point in master_bin_list:
master_bin_ll = master_bin_list[point][0]
master_bin_ul = master_bin_list[point][1]
master_bin_mid = master_bin_ll + (master_bin_ul-master_bin_ll)/2
UHSAS_dict[master_bin_mid] = []
UHSAS_to_fit_list = []
for flight in flight_times:
print flight
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UHSAS_file = flight_times[flight][2]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
###SP2
cursor.execute(('''SELECT bc.rBC_mass_fg, bc.coat_thickness_nm_jancalib, bc.LF_scat_amp, hk.sample_flow, ftd.temperature_C, ftd.BP_Pa
FROM polar6_coating_2015 bc
JOIN polar6_fssp_cloud_data fssp on bc.fssp_id = fssp.id
JOIN polar6_flight_track_details ftd on bc.flight_track_data_id = ftd.id
JOIN polar6_hk_data_2015 hk on bc.hk_data_id = hk.id
WHERE bc.rBC_mass_fg IS NOT NULL and bc.UNIX_UTC_ts >= %s and bc.UNIX_UTC_ts < %s and bc.particle_type = %s and fssp.FSSPTotalConc <=%s and hk.sample_flow >%s and hk.sample_flow <%s AND bc.rBC_mass_fg >= %s AND bc.rBC_mass_fg <= %s ORDER BY bc.UNIX_UTC_ts'''),
(UNIX_start_time,UNIX_end_time,'incand',cloud_droplet_conc,100,200,min_rBC_mass,max_rBC_mass))
coat_data = cursor.fetchall()
#assemble the data for this bin
flight_data = assemble_bin_data(coat_data,flight_data)
##UHSAS
if UHSAS_file == '':
continue
UHSAS_bin_list = getUHSASBins(UHSAS_file)
#get number concs for each bin
for point in UHSAS_bin_list:
bin_ll = UHSAS_bin_list[point][0]
bin_ul = UHSAS_bin_list[point][1]
bin_mid = bin_ll + (bin_ul-bin_ll)/2
#get UHSAS #avg for that bin on that flight (values taken every sec)
cursor.execute(('''SELECT avg(property_value)
FROM polar6_uhsas_rbc_binned_data_altcalib bd
WHERE bd.UNIX_UTC_ts >= %s
AND bd.UNIX_UTC_ts < %s
AND (bd.bin_LL + (bd.bin_UL-bd.bin_LL)/2) >= %s
AND (bd.bin_LL + (bd.bin_UL-bd.bin_LL)/2) < %s
AND bd.binned_property = %s
'''),
(UNIX_start_time, UNIX_end_time,bin_ll,bin_ul,'UHSAS_#'))
UHSAS_number = cursor.fetchall()
UHSAS_number_conc = UHSAS_number[0][0]
if UHSAS_number_conc != None:
UHSAS_number_conc_norm = UHSAS_number_conc*1.0/(math.log((bin_ul))-math.log(bin_ll))
for point in master_bin_list:
master_bin_ll = master_bin_list[point][0]
master_bin_ul = master_bin_list[point][1]
master_bin_mid = master_bin_ll + (master_bin_ul-master_bin_ll)/2
if master_bin_ll <= bin_mid < master_bin_ul:
UHSAS_dict[master_bin_mid].append(UHSAS_number_conc_norm)
if bin_ll >= min_BC_VED:
UHSAS_to_fit_list.append([master_bin_ll,master_bin_ul,master_bin_mid,np.nan,UHSAS_number_conc,UHSAS_number_conc_norm])
UHSAS_list = []
for bin in UHSAS_dict:
mean_conc = np.mean(UHSAS_dict[bin])
UHSAS_list.append([bin,mean_conc])
UHSAS_list.sort()
#UHSAS fit
UHSAS_to_fit_list.sort()
UHSAS_fitted_list = []
uhsas_fit_func = fit_distr(UHSAS_to_fit_list,'UHSAS')
for point in UHSAS_bin_list:
bin_ll = UHSAS_bin_list[point][0]
bin_ul = UHSAS_bin_list[point][1]
bin_mid = bin_ll + (bin_ul-bin_ll)/2
fit_uhsas_number_conc_norm = lognorm(bin_mid, uhsas_fit_func[0], uhsas_fit_func[1], uhsas_fit_func[2])
fit_uhsas_number_conc = fit_uhsas_number_conc_norm*(math.log((bin_ul))-math.log(bin_ll))
UHSAS_fitted_list.append([bin_ll,bin_ul,bin_mid,np.nan,fit_uhsas_number_conc,fit_uhsas_number_conc_norm])
UHSAS_fitted_list.sort()
print 'binning'
binned_data = calc_bin_values(flight_data,UNIX_start_time,UNIX_end_time,UHSAS_file)
plot_distrs(binned_data,UHSAS_list,UHSAS_fitted_list)
write_files(binned_data)
cnx.close() | mit |
riscmaster/risc_maap | risc_visual/src/Ishmaalsplots.py | 2 | 2444 | #!/usr/bin/env python
'''======================================================
Created by: Ishmaal Erekson
Last updated: January 2015
File name: Ishmaalsplots.py
Organization: RISC Lab, Utah State University
======================================================'''
import roslib; roslib.load_manifest('ardrone_tutorials')
roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import sys
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import pylab as p
import matplotlib.pyplot as plt
import IshyPlots as pl
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import * # states,controls,trajectory
from ardrone_autonomy.msg import * # on board state estimates
#========================#
# Globals #
#========================#
rate = 60 # Hz
start_time = 0
states = Cortex()
states.Obj = [States()]*1
t = 0
#===================================#
# Plotting Variables #
#===================================#
Init_mat = np.asmatrix(np.zeros((1,3)))
#==================#
# Get States #
#==================#
def GetStates(I):
global states
states = I
#=============#
# Plots #
#=============#
def Plots():
global states,Init_mat
x_act = states.Obj[0].x
y_act = states.Obj[0].y
z_act = states.Obj[0].z
new_stack = np.asmatrix(np.array([x_act, y_act, z_act]))
Init_mat = np.append(Init_mat,new_stack,0)
def Plotaroosky():
global Init_mat
pl.pl3d('Plotalicious',Init_mat[0:,0],Init_mat[0:,1],Init_mat[0:,2])
plt.show()
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('Ishmaalsplots')
start_time = rospy.get_time()
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_states = rospy.Subscriber('/cortex_raw' , Cortex, GetStates)
Plots()
t = rospy.get_time() - start_time
rospy.loginfo("time = %f",t)
if t > 20:
Plotaroosky()
start_time = rospy.get_time()
r.sleep()
| bsd-2-clause |
rcpedersen/stl-to-voxel | testslice.py | 1 | 4181 | import slice
import stl_reader
import unittest
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
import random
import numpy as np
from util import printBigArray
class TestSlice(unittest.TestCase):
def testslice(self):
mesh = list(stl_reader.read_stl_verticies("./stls/cube.stl"))
# for p in slice.slice(mesh, .5):
# print(list(p))
def testIsAboveAndBelow(self):
tri = (
(1, 2, 5),
(2, 3, 4),
(3, 2, 1)
)
self.assertTrue(slice.isAboveAndBelow(tri, 4))
self.assertTrue(slice.isAboveAndBelow(tri, 5))
self.assertTrue(slice.isAboveAndBelow(tri, 1))
self.assertFalse(slice.isAboveAndBelow(tri, 5.5))
self.assertFalse(slice.isAboveAndBelow(tri, 0))
def testIsAboveAndBelow_inclusive(self):
tri = [
[1, 2, 5],
[2, 3, 5],
[3, 2, 1]
]
self.assertTrue(slice.isAboveAndBelow(tri, 5))
def test_wherelinecrossesz(self):
p1 = [2, 4, 1]
p2 = [1, 2, 5]
self.assertEqual(slice.whereLineCrossesZ(p1, p2, 1), p1)
self.assertEqual(slice.whereLineCrossesZ(p1, p2, 5), p2)
self.assertEqual(slice.whereLineCrossesZ(p1, p2, 3), [1.5, 3, 3])
self.assertEqual(slice.whereLineCrossesZ([0, 0, 0], [0, 1, 1], 0.5), [0.0, 0.5, 0.5])
def test_linearInterpolation(self):
p1 = [2, 4, 1]
p2 = [1, 2, 5]
self.assertEqual(slice.linearInterpolation(p1, p2, 0), p1)
self.assertEqual(slice.linearInterpolation(p1, p2, 1), p2)
self.assertEqual(slice.linearInterpolation(p1, p2, .5), [1.5, 3, 3])
def test_makebigarray(self):
chr = ' '
arr = [[chr, chr], [chr, chr]]
self.assertEqual(slice.makeBigArrayOfZeros(2), arr)
self.assertEqual(len(slice.makeBigArrayOfZeros(3)), 3)
def test_toVoxels(self):
# mesh = list(stl_reader.read_stl_verticies("./stls/sphere.stl"))
# lines = slice.slice(mesh, 1)
# slice.toVoxels(lines)
pass
def test_triangleToIntersectingLines(self):
tri = [
[2, 4, 1],
[1, 2, 5],
[3, 2, 3]
]
lines = list(slice.triangleToIntersectingLines(tri, 4))
self.assertIn((tri[0], tri[1]), lines)
self.assertIn((tri[2], tri[1]), lines)
print(lines)
self.assertEqual(2, len(lines))
def test_triangleToIntersectingLines_onePointSame(self):
tri = [
[2, 4, 1],
[1, 2, 5],
[3, 2, 3]
]
lines = list(slice.triangleToIntersectingLines(tri, 3))
self.assertTrue((tri[0], tri[2]) in lines or (tri[2], tri[1]) in lines)
def test_triangleToIntersectingLines_twoPointSame(self):
tri = [
[2, 4, 3],
[1, 2, 5],
[3, 2, 3]
]
lines = list(slice.triangleToIntersectingLines(tri, 3))
self.assertTrue((tri[0], tri[1]) in lines or (tri[2], tri[1]) in lines)
def test_triangleToIntersectingLines_threePointSame(self):
tri = [
[2, 4, 3],
[1, 2, 3],
[3, 2, 3]
]
lines = list(slice.triangleToIntersectingLines(tri, 3))
self.assertTrue(tri in lines)
def test_toVoxels(self):
lines = [
[[3, 0, 7], [0, 3, 7]],
[[3, 7, 7], [0, 3, 7]],
[[3, 7, 7], [7, 3, 7]],
[[3, 0, 7], [7, 3, 7]],
]
expected = [[False, False, False, True, True, False, False, False],
[False, False, True, True, True, True, False, False],
[False, True, True, True, True, True, True, False],
[True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True],
[False, True, True, True, True, True, True, False],
[False, False, True, True, True, True, False, False],
[False, False, False, True, True, False, False, False]]
printBigArray(slice.toVoxels(lines, 8, 8))
# self.assertTrue((expected==)
| mit |
h2educ/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Coupled_Contact/Steady_State_Single_Foundation_Sysytem_Under_Tension/CoupledSoftContact/n_0.3/Plot_Results.py | 15 | 3554 | #!/usr/bin/env python
#!/usr/bin/python
import h5py
from matplotlib import pylab
import matplotlib.pylab as plt
import sys
from matplotlib.font_manager import FontProperties
import math
import numpy as np
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 30})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=28
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=28
# Plot the figure. Add labels and titles.
plt.figure()
ax = plt.subplot(111)
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Stress [Pa] ")
# Pore Pressure
# #########################################################################
thefile = "Soil_Foundation_System_Surface_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
upU_p = finput["/Model/Nodes/Generalized_Displacements"][3,:]
upU_u = finput["/Model/Nodes/Generalized_Displacements"][2,:]
upU_U = finput["/Model/Nodes/Generalized_Displacements"][6,:]
u_u = finput["/Model/Nodes/Generalized_Displacements"][79,:]
sigma_zz_ = finput["/Model/Elements/Gauss_Outputs"][14,:]
# pore_pressure
ax.plot(times,upU_p,'b',linewidth=2,label=r'Pore Pressure $p$');
ax.hold(True);
# Total Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
T = times[len(times)-1]
sigma_zz = -400/T*times
# kinetic energy
ax.plot(times,sigma_zz,'k',linewidth=2,label=r'Total Stress $\sigma$');
ax.hold(True);
# Effective Stress
# #########################################################################
# Read the time and displacement
times = finput["time"][:];
sigma_zz_ = sigma_zz - upU_p
# kinetic energy
ax.plot(times,sigma_zz_,'r',linewidth=2,label=r'''Effective Stress $\sigma^{\prime}$''');
ax.hold(True);
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.35),
ncol=2, fancybox=True, shadow=True, prop={'size': 24})
pylab.savefig("Coupled_Soft_Contact_Steady_State_SF_Ststem_Under_Compression_Porosity_Effective_Stress_Principle.pdf", bbox_inches='tight')
# plt.show()
#
################################### Drainage Condition Verification #############################
ax.hold(False);
fig = plt.figure();
ax = plt.subplot(111)
ax.plot(times,upU_u*1e8,'k',linewidth=3,label=r'$upU\_u$'); ax.hold(True);
ax.plot(times,upU_U*1e8,'b',linewidth=10,label=r'$upU\_U$'); ax.hold(True);
ax.plot(times,u_u*1e8,'r',linewidth=3,label=r'$u\_u$'); ax.hold(True);
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Displacement $\times 1e^{-8}$ [m] ")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.25),
ncol=4, fancybox=True, shadow=True, prop={'size': 24})
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
max_xticks = 5
yloc = plt.MaxNLocator(max_xticks)
ax.xaxis.set_major_locator(yloc)
pylab.savefig("Coupled_Soft_Contact_Steady_State_SF_Ststem_Under_Compression_Porosity_Undrained_Conditions.pdf", bbox_inches='tight')
# plt.show()
| cc0-1.0 |
rvraghav93/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 81 | 2525 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/util/test_assert_index_equal.py | 1 | 6392 | import numpy as np
import pytest
from pandas import Categorical, Index, MultiIndex, NaT
import pandas._testing as tm
def test_index_equal_levels_mismatch():
msg = """Index are different
Index levels are different
\\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 2, MultiIndex\\(\\[\\('A', 1\\),
\\('A', 2\\),
\\('B', 3\\),
\\('B', 4\\)\\],
\\)"""
idx1 = Index([1, 2, 3])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, exact=False)
def test_index_equal_values_mismatch(check_exact):
msg = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
def test_index_equal_length_mismatch(check_exact):
msg = """Index are different
Index length are different
\\[left\\]: 3, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 4, Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = Index([1, 2, 3])
idx2 = Index([1, 2, 3, 4])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
def test_index_equal_class_mismatch(check_exact):
msg = """Index are different
Index classes are different
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Float64Index\\(\\[1\\.0, 2\\.0, 3\\.0\\], dtype='float64'\\)"""
idx1 = Index([1, 2, 3])
idx2 = Index([1, 2, 3.0])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, exact=True, check_exact=check_exact)
def test_index_equal_values_close(check_exact):
idx1 = Index([1, 2, 3.0])
idx2 = Index([1, 2, 3.0000000001])
if check_exact:
msg = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
else:
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
def test_index_equal_values_less_close(check_exact, rtol):
idx1 = Index([1, 2, 3.0])
idx2 = Index([1, 2, 3.0001])
kwargs = {"check_exact": check_exact, "rtol": rtol}
if check_exact or rtol < 0.5e-3:
msg = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, **kwargs)
else:
tm.assert_index_equal(idx1, idx2, **kwargs)
def test_index_equal_values_too_far(check_exact, rtol):
idx1 = Index([1, 2, 3])
idx2 = Index([1, 2, 4])
kwargs = {"check_exact": check_exact, "rtol": rtol}
msg = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, **kwargs)
@pytest.mark.parametrize("check_order", [True, False])
def test_index_equal_value_oder_mismatch(check_exact, rtol, check_order):
idx1 = Index([1, 2, 3])
idx2 = Index([3, 2, 1])
msg = """Index are different
Index values are different \\(66\\.66667 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[3, 2, 1\\], dtype='int64'\\)"""
if check_order:
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(
idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=True
)
else:
tm.assert_index_equal(
idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=False
)
def test_index_equal_level_values_mismatch(check_exact, rtol):
idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
kwargs = {"check_exact": check_exact, "rtol": rtol}
msg = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, **kwargs)
@pytest.mark.parametrize(
"name1,name2",
[(None, "x"), ("x", "x"), (np.nan, np.nan), (NaT, NaT), (np.nan, NaT)],
)
def test_index_equal_names(name1, name2):
idx1 = Index([1, 2, 3], name=name1)
idx2 = Index([1, 2, 3], name=name2)
if name1 == name2 or name1 is name2:
tm.assert_index_equal(idx1, idx2)
else:
name1 = "'x'" if name1 == "x" else name1
name2 = "'x'" if name2 == "x" else name2
msg = f"""Index are different
Attribute "names" are different
\\[left\\]: \\[{name1}\\]
\\[right\\]: \\[{name2}\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2)
def test_index_equal_category_mismatch(check_categorical):
msg = """Index are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False\\)
\\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \
ordered=False\\)"""
idx1 = Index(Categorical(["a", "b"]))
idx2 = Index(Categorical(["a", "b"], categories=["a", "b", "c"]))
if check_categorical:
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)
else:
tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)
| bsd-3-clause |
MatthieuBizien/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
immerrr/numpy | numpy/lib/polynomial.py | 1 | 37669 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
V : ndaray, shape (M,M) or (M,M,K) : present only if `full` = False and `cov`=True
The covariance matrix of the polynomial coefficient estimates. The diagonal
of this matrix are the variance estimates for each coefficient. If y is a 2-d
array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0] :
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0] :
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full :
return c, resids, rank, s, rcond
elif cov :
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/utils/estimator_checks.py | 41 | 47834 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/sandbox/distributions/otherdist.py | 33 | 10145 | '''Parametric Mixture Distributions
Created on Sat Jun 04 2011
Author: Josef Perktold
Notes:
Compound Poisson has mass point at zero
http://en.wikipedia.org/wiki/Compound_Poisson_distribution
and would need special treatment
need a distribution that has discrete mass points and contiuous range, e.g.
compound Poisson, Tweedie (for some parameter range),
pdf of Tobit model (?) - truncation with clipping
Question: Metaclasses and class factories for generating new distributions from
existing distributions by transformation, mixing, compounding
'''
from __future__ import print_function
import numpy as np
from scipy import stats
class ParametricMixtureD(object):
'''mixtures with a discrete distribution
The mixing distribution is a discrete distribution like scipy.stats.poisson.
All distribution in the mixture of the same type and parameterized
by the outcome of the mixing distribution and have to be a continuous
distribution (or have a pdf method).
As an example, a mixture of normal distributed random variables with
Poisson as the mixing distribution.
assumes vectorized shape, loc and scale as in scipy.stats.distributions
assume mixing_dist is frozen
initialization looks fragile for all possible cases of lower and upper
bounds of the distributions.
'''
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parameterized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support)
def rvs(self, size=1):
mrvs = self.mixing_dist.rvs(size)
#TODO: check strange cases ? this assumes continous integers
mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)
bd_args = tuple(md[mrvs_idx] for md in self.bd_args)
bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)
kwds = {'size':size}
kwds.update(bd_kwds)
rvs = self.base_dist.rvs(*self.bd_args, **kwds)
return rvs, mrvs_idx
def pdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
def cdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
#try:
class ClippedContinuous(object):
'''clipped continuous distribution with a masspoint at clip_lower
Notes
-----
first version, to try out possible designs
insufficient checks for valid arguments and not clear
whether it works for distributions that have compact support
clip_lower is fixed and independent of the distribution parameters.
The clip_lower point in the pdf has to be interpreted as a mass point,
i.e. different treatment in integration and expect function, which means
none of the generic methods for this can be used.
maybe this will be better designed as a mixture between a degenerate or
discrete and a continuous distribution
Warning: uses equality to check for clip_lower values in function
arguments, since these are floating points, the comparison might fail
if clip_lower values are not exactly equal.
We could add a check whether the values are in a small neighborhood, but
it would be expensive (need to search and check all values).
'''
def __init__(self, base_dist, clip_lower):
self.base_dist = base_dist
self.clip_lower = clip_lower
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds
def rvs(self, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
rvs_ = self.base_dist.rvs(*args, **kwds)
#same as numpy.clip ?
rvs_[rvs_ < clip_lower] = clip_lower
return rvs_
def pdf(self, x, *args, **kwds):
x = np.atleast_1d(x)
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))
clip_mask = (x == self.clip_lower)
if np.any(clip_mask):
clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
pdf_raw[x < clip_lower] = 0
return pdf_raw
def cdf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
cdf_raw = self.base_dist.cdf(x, *args, **kwds)
#not needed if equality test is used
## clip_mask = (x == self.clip_lower)
## if np.any(clip_mask):
## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
## pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
#if self.a is defined
cdf_raw[x < clip_lower] = 0
return cdf_raw
def sf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
sf_raw = self.base_dist.sf(x, *args, **kwds)
sf_raw[x <= clip_lower] = 1
return sf_raw
def ppf(self, x, *args, **kwds):
raise NotImplementedError
def plot(self, x, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
mass = self.pdf(clip_lower, *args, **kwds)
xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))
import matplotlib.pyplot as plt
#x = np.linspace(-4, 4, 21)
#plt.figure()
plt.xlim(clip_lower-0.1, x.max())
#remove duplicate calculation
xpdf = self.pdf(x, *args, **kwds)
plt.ylim(0, max(mass, xpdf.max())*1.1)
plt.plot(xr, self.pdf(xr, *args, **kwds))
#plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))
plt.stem([clip_lower], [mass],
linefmt='b-', markerfmt='bo', basefmt='r-')
return
if __name__ == '__main__':
doplots = 1
#*********** Poisson-Normal Mixture
mdist = stats.poisson(2.)
bdist = stats.norm
bd_args_fn = lambda x: ()
#bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}
bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}
pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)
print(pd.pdf(1))
p, bp = pd.pdf(np.linspace(0,20,21))
pc, bpc = pd.cdf(np.linspace(0,20,21))
print(pd.rvs())
rvs, m = pd.rvs(size=1000)
if doplots:
import matplotlib.pyplot as plt
plt.hist(rvs, bins = 100)
plt.title('poisson mixture of normal distributions')
#********** clipped normal distribution (Tobit)
bdist = stats.norm
clip_lower_ = 0. #-0.5
cnorm = ClippedContinuous(bdist, clip_lower_)
x = np.linspace(1e-8, 4, 11)
print(cnorm.pdf(x))
print(cnorm.cdf(x))
if doplots:
#plt.figure()
#cnorm.plot(x)
plt.figure()
cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))
plt.title('clipped normal distribution')
fig = plt.figure()
for i, loc in enumerate([0., 0.5, 1.,2.]):
fig.add_subplot(2,2,i+1)
cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))
plt.title('clipped normal, loc = %3.2f' % loc)
loc = 1.5
rvs = cnorm.rvs(loc=loc, size=2000)
plt.figure()
plt.hist(rvs, bins=50)
plt.title('clipped normal rvs, loc = %3.2f' % loc)
#plt.show()
| bsd-3-clause |
devanshdalal/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 43 | 10272 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
ysekky/GPy | GPy/plotting/plotly_dep/plot_definitions.py | 4 | 16743 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from ..abstract_plotting_library import AbstractPlottingLibrary
from .. import Tango
from . import defaults
import plotly
from plotly import tools
from plotly.graph_objs import Scatter, Scatter3d, Line,\
Marker, ErrorX, ErrorY, Bar, Heatmap, Trace,\
Annotations, Annotation, Contour, Font, Surface
from plotly.exceptions import PlotlyDictKeyError
SYMBOL_MAP = {
'o': 'dot',
'v': 'triangle-down',
'^': 'triangle-up',
'<': 'triangle-left',
'>': 'triangle-right',
's': 'square',
'+': 'cross',
'x': 'x',
'*': 'x', # no star yet in plotly!!
'D': 'diamond',
'd': 'diamond',
}
class PlotlyPlotsBase(AbstractPlottingLibrary):
def __init__(self):
super(PlotlyPlotsBase, self).__init__()
self._defaults = defaults.__dict__
self.current_states = dict()
def figure(self, rows=1, cols=1, specs=None, is_3d=False, **kwargs):
if specs is None:
specs = [[{'is_3d': is_3d}]*cols]*rows
figure = tools.make_subplots(rows, cols, specs=specs, **kwargs)
return figure
def new_canvas(self, figure=None, row=1, col=1, projection='2d',
xlabel=None, ylabel=None, zlabel=None,
title=None, xlim=None,
ylim=None, zlim=None, **kwargs):
#if 'filename' not in kwargs:
# print('PlotlyWarning: filename was not given, this may clutter your plotly workspace')
# filename = None
#else:
# filename = kwargs.pop('filename')
if figure is None:
figure = self.figure(is_3d=projection=='3d')
figure.layout.font = Font(family="Raleway, sans-serif")
if projection == '3d':
figure.layout.legend.x=.5
figure.layout.legend.bgcolor='#DCDCDC'
return (figure, row, col), kwargs
def add_to_canvas(self, canvas, traces, legend=False, **kwargs):
figure, row, col = canvas
def append_annotation(a, xref, yref):
if 'xref' not in a:
a['xref'] = xref
if 'yref' not in a:
a['yref'] = yref
figure.layout.annotations.append(a)
def append_trace(t, row, col):
figure.append_trace(t, row, col)
def recursive_append(traces):
if isinstance(traces, Annotations):
xref, yref = figure._grid_ref[row-1][col-1]
for a in traces:
append_annotation(a, xref, yref)
# elif isinstance(traces, (Trace)): # doesn't work
# elif type(traces) in [v for k,v in go.__dict__.iteritems()]:
elif isinstance(traces, (Scatter, Scatter3d, ErrorX,
ErrorY, Bar, Heatmap, Trace, Contour, Surface)):
try:
append_trace(traces, row, col)
except PlotlyDictKeyError:
# Its a dictionary of plots:
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (dict)):
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (tuple, list)):
for t in traces:
recursive_append(t)
recursive_append(traces)
figure.layout['showlegend'] = legend
return canvas
def show_canvas(self, canvas, filename=None, **kwargs):
return NotImplementedError
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], cmap=None, label=None, marker='o', marker_kwargs=None, **kwargs):
try:
marker = SYMBOL_MAP[marker]
except:
#not matplotlib marker
pass
marker_kwargs = marker_kwargs or {}
if 'symbol' not in marker_kwargs:
marker_kwargs['symbol'] = marker
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
return Scatter(x=X, y=Y, mode='markers', showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, line_kwargs=None, **kwargs):
if 'mode' not in kwargs:
kwargs['mode'] = 'lines'
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
return Scatter(x=X, y=Y, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['mediumBlue'], label=None, marker_kwargs=None, **kwargs):
if X.shape[1] == 1:
annotations = Annotations()
for i, row in enumerate(X):
annotations.append(
Annotation(
text='',
x=row[0], y=0,
yref='paper',
ax=0, ay=20,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor=color,
showarrow=True,
#showlegend=i==0,
#label=label,
))
return annotations
elif X.shape[1] == 2:
marker_kwargs.setdefault('symbol', 'diamond')
opacity = kwargs.pop('opacity', .8)
return Scatter3d(x=X[:, 0], y=X[:, 1], z=np.zeros(X.shape[0]),
mode='markers',
projection=dict(z=dict(show=True, opacity=opacity)),
marker=Marker(color=color, **marker_kwargs or {}),
opacity=0,
name=label,
showlegend=label is not None, **kwargs)
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
figure, _, _ = canvas
if 'barmode' in kwargs:
figure.layout['barmode'] = kwargs.pop('barmode')
return Bar(x=x, y=height, marker=Marker(color=color), name=label)
def xerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def yerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
X, Y = np.squeeze(X), np.squeeze(Y)
if Z is not None:
Z = np.squeeze(Z)
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def imshow(self, ax, X, extent=None, label=None, vmin=None, vmax=None, **imshow_kwargs):
if not 'showscale' in imshow_kwargs:
imshow_kwargs['showscale'] = False
return Heatmap(z=X, name=label,
x0=extent[0], dx=float(extent[1]-extent[0])/(X.shape[0]-1),
y0=extent[2], dy=float(extent[3]-extent[2])/(X.shape[1]-1),
zmin=vmin, zmax=vmax,
showlegend=label is not None,
hoverinfo='z',
**imshow_kwargs)
def imshow_interact(self, ax, plot_function, extent=None, label=None, resolution=None, vmin=None, vmax=None, **imshow_kwargs):
# TODO stream interaction?
super(PlotlyPlotsBase, self).imshow_interact(ax, plot_function)
def annotation_heatmap(self, ax, X, annotation, extent=None, label='Gradient', imshow_kwargs=None, **annotation_kwargs):
imshow_kwargs.setdefault('label', label)
imshow_kwargs.setdefault('showscale', True)
imshow = self.imshow(ax, X, extent, **imshow_kwargs)
X = X-X.min()
X /= X.max()/2.
X -= 1
x = np.linspace(extent[0], extent[1], X.shape[0])
y = np.linspace(extent[2], extent[3], X.shape[1])
annotations = Annotations()
for n, row in enumerate(annotation):
for m, val in enumerate(row):
var = X[n][m]
annotations.append(
Annotation(
text=str(val),
x=x[m], y=y[n],
xref='x1', yref='y1',
font=dict(color='white' if np.abs(var) > 0.8 else 'black', size=10),
opacity=.5,
showarrow=False,
))
return imshow, annotations
def annotation_heatmap_interact(self, ax, plot_function, extent, label=None, resolution=15, imshow_kwargs=None, **annotation_kwargs):
super(PlotlyPlotsBase, self).annotation_heatmap_interact(ax, plot_function, extent)
def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs):
return Contour(x=X, y=Y, z=C,
#ncontours=levels, contours=Contours(start=C.min(), end=C.max(), size=(C.max()-C.min())/levels),
name=label, **kwargs)
def surface(self, ax, X, Y, Z, color=None, label=None, **kwargs):
return Surface(x=X, y=Y, z=Z, name=label, showlegend=label is not None, **kwargs)
def fill_between(self, ax, X, lower, upper, color=Tango.colorsHex['mediumBlue'], label=None, line_kwargs=None, **kwargs):
if not 'line' in kwargs:
kwargs['line'] = Line(**line_kwargs or {})
else:
kwargs['line'].update(line_kwargs or {})
if color.startswith('#'):
fcolor = 'rgba({c[0]}, {c[1]}, {c[2]}, {alpha})'.format(c=Tango.hex2rgb(color), alpha=kwargs.get('opacity', 1.0))
else: fcolor = color
u = Scatter(x=X, y=upper, fillcolor=fcolor, showlegend=label is not None, name=label, fill='tonextx', legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
#fcolor = '{}, {alpha})'.format(','.join(fcolor.split(',')[:-1]), alpha=0.0)
l = Scatter(x=X, y=lower, fillcolor=fcolor, showlegend=False, name=label, legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
return l, u
def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
if color.startswith('#'):
colarray = Tango.hex2rgb(color)
opacity = .9
else:
colarray = map(float(color.strip(')').split('(')[1]))
if len(colarray) == 4:
colarray, opacity = colarray[:3] ,colarray[3]
alpha = opacity*(1.-np.abs(np.linspace(-1,1,len(percentiles)-1)))
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
polycol = []
for i, y1, a in zip(range(len(percentiles)), percentiles, alpha):
fcolor = 'rgba({}, {}, {}, {alpha})'.format(*colarray, alpha=a)
if i == len(percentiles)/2:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=True,
name=label, line=Line(width=0, smoothing=0), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
else:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=False,
name=None, line=Line(width=1, smoothing=0, color=fcolor), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
return polycol
class PlotlyPlotsOnline(PlotlyPlotsBase):
def __init__(self):
super(PlotlyPlotsOnline, self).__init__()
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
return plotly.plotly.iplot(figure, filename=filename, **kwargs)
else:
return plotly.plotly.plot(figure, filename=filename, **kwargs)#self.current_states[hex(id(figure))]['filename'])
class PlotlyPlotsOffline(PlotlyPlotsBase):
def __init__(self):
super(PlotlyPlotsOffline, self).__init__()
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
plotly.offline.init_notebook_mode(connected=True)
return plotly.offline.iplot(figure, filename=filename, **kwargs)#self.current_states[hex(id(figure))]['filename'])
else:
return plotly.offline.plot(figure, filename=filename, **kwargs)
| bsd-3-clause |
B3AU/waveTree | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 52 | 8004 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.