repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/backends/backend_macosx.py | 8 | 16426 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib import rcParams
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
class Show(ShowBase):
def mainloop(self):
_macosx.show()
show = Show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forward the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.gc.set_dpi(self.dpi)
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
linewidth = gc.get_linewidth()
gc.draw_path(path, transform, linewidth, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
linewidth = gc.get_linewidth()
gc.draw_markers(marker_path, marker_trans, path, trans, linewidth, rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
if offset_position=='data':
offset_position = True
else:
offset_position = False
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
master_transform = master_transform.get_matrix()
offsetTrans = offsetTrans.get_matrix()
gc.draw_path_collection(master_transform, path_ids, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds,
offset_position)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
gc.draw_quad_mesh(master_transform.get_matrix(),
meshWidth,
meshHeight,
coordinates,
offsets,
offsetTrans.get_matrix(),
facecolors,
antialiased,
edgecolors)
def new_gc(self):
self.gc.save()
self.gc.set_hatch(None)
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def draw_gouraud_triangle(self, gc, points, colors, transform):
points = transform.transform(points)
gc.draw_gouraud_triangle(points, colors)
def get_image_magnification(self):
return self.gc.get_image_magnification()
def draw_image(self, gc, x, y, im):
nrows, ncols, data = im.as_rgba_str()
gc.draw_image(x, y, nrows, ncols, data)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# todo, handle props, angle, origins
scale = self.gc.get_image_magnification()
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi*scale)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
scale = self.gc.get_image_magnification()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi*scale, prop)
descent /= scale
xd = descent * numpy.sin(numpy.deg2rad(angle))
yd = descent * numpy.cos(numpy.deg2rad(angle))
x = numpy.round(x + ox + xd)
y = numpy.round(y + oy - yd)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
weight = prop.get_weight()
# transform weight into string for the native backend
if weight >= 700:
weight = 'bold'
else:
weight = 'normal'
style = prop.get_style()
points = prop.get_size_in_points()
size = self.points_to_pixels(points)
gc.draw_text(x, y, six.text_type(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# todo: handle props
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
weight = prop.get_weight()
# transform weight into string for the native backend
if weight >= 700:
weight = 'bold'
else:
weight = 'normal'
style = prop.get_style()
points = prop.get_size_in_points()
size = self.points_to_pixels(points)
width, height, descent = self.gc.get_text_width_height_descent(
six.text_type(s), family, size, weight, style)
return width, height, descent
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
_macosx.GraphicsContext.set_alpha(self, _alpha, self.get_forced_alpha())
rgb = self.get_rgb()
_macosx.GraphicsContext.set_foreground(self, rgb)
def set_foreground(self, fg, isRGBA=False):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
rgb = self.get_rgb()
_macosx.GraphicsContext.set_foreground(self, rgb)
def set_graylevel(self, fg):
GraphicsContextBase.set_graylevel(self, fg)
_macosx.GraphicsContext.set_graylevel(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class TimerMac(_macosx.Timer, TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
run loops for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
# completely implemented at the C-level (in _macosx.Timer)
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['gif'] = 'Graphics Interchange Format'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def draw_idle(self, *args, **kwargs):
self.invalidate()
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
FigureCanvasBase.resize_event(self)
def _print_bitmap(self, filename, *args, **kwargs):
# In backend_bases.py, print_figure changes the dpi of the figure.
# But since we are essentially redrawing the picture, we need the
# original dpi. Pick it up from the renderer.
dpi = kwargs['dpi']
old_dpi = self.figure.dpi
self.figure.dpi = self.renderer.dpi
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
filename = six.text_type(filename)
self.write_bitmap(filename, width, height, dpi)
self.figure.dpi = old_dpi
def print_bmp(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_jpg(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_jpeg(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_tif(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_tiff(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_gif(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerMac(*args, **kwargs)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
if matplotlib.is_interactive():
self.show()
self.canvas.draw_idle()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self, *args):
filename = _macosx.choose_save_file('Save the figure',
self.canvas.get_default_filename())
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
def dynamic_update(self):
self.canvas.draw_idle()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasMac
FigureManager = FigureManagerMac
| mit |
biogeochemistry/PorousMediaLab | porousmedialab/plotter.py | 1 | 9693 | import math
import numpy as np
import matplotlib.pyplot as plt
from porousmedialab.phcalc import Acid
import seaborn as sns
from matplotlib.colors import ListedColormap
sns.set_style("whitegrid")
def custom_plot(lab, x, y, ttl='', y_lbl='', x_lbl=''):
plt.figure()
ax = plt.subplot(111)
plt.plot(x, y, lw=3)
plt.title(ttl)
plt.xlim(x[0], x[-1])
plt.ylabel(y_lbl)
plt.xlabel(x_lbl)
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_batch_rates(batch, *args, **kwargs):
for rate in sorted(batch.estimated_rates):
plt.figure()
plot_batch_rate(batch, rate, *args, **kwargs)
def plot_batch_rate(batch, rate, time_factor=1):
plt.plot(batch.time * time_factor,
batch.estimated_rates[rate][0] / time_factor, label=rate, lw=3)
plt.ylabel('Rate, $[\Delta C/\Delta T]$')
plt.xlabel('Time, [T]')
plt.legend(frameon=1)
plt.grid(linestyle='-', linewidth=0.2)
def plot_batch_deltas(batch, *args, **kwargs):
for element in sorted(batch.species):
plt.figure()
plot_batch_delta(batch, element, *args, **kwargs)
def plot_batch_delta(batch, element, time_factor=1):
plt.plot(batch.time[1:] * time_factor, batch.species[element]
['rates'][0] / time_factor, label=element, lw=3)
plt.ylabel('Rate of change, $[\Delta C/ \Delta T]$')
plt.xlabel('Time, [T]')
plt.legend(frameon=1)
plt.grid(linestyle='-', linewidth=0.2)
def saturation_index_countour(lab, elem1, elem2, Ks, labels=False):
plt.figure()
plt.title('Saturation index %s%s' % (elem1, elem2))
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
plt.xlabel('Time')
z = np.log10((lab.species[elem1]['concentration'][:, ::n] + 1e-8) * (
lab.species[elem2]['concentration'][:, ::n] + 1e-8) / lab.constants[Ks])
lim = np.max(abs(z))
lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[::n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette(
"RdBu_r", 101)), origin='lower', levels=lim, extend='both')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
# cbar = plt.colorbar(CS)
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Saturation index %s%s' % (elem1, elem2))
return ax
def plot_fractions(lab):
for component in lab.acid_base_components:
if isinstance(component['pH_object'], Acid):
plt.figure()
for idx in range(len(component['species'])):
plt.plot(lab.time, lab.species[component['species'][idx]]
['alpha'][0, :], label=component['species'][idx])
plt.ylabel('Fraction')
plt.xlabel('Time')
plt.legend(frameon=1)
plt.grid(linestyle='-', linewidth=0.2)
def all_plot_depth_index(lab, *args, **kwargs):
for element in sorted(lab.species):
plt.figure()
plot_depth_index(lab, element, *args, **kwargs, ax=None)
def plot_depth_index(lab, element, idx=0, time_to_plot=False, time_factor=1, ax=None):
if ax is None:
ax = plt.subplot(111)
if element == 'Temperature':
ax.set_title('Temperature')
ax.set_ylabel('Temperature, C')
elif element == 'pH':
ax.set_title('pH')
ax.set_ylabel('pH')
else:
ax.set_ylabel('Concentration')
if time_to_plot:
num_of_elem = int(time_to_plot / lab.dt)
else:
num_of_elem = len(lab.time)
t = lab.time[-num_of_elem:] * time_factor
ax.set_xlabel('Time')
if isinstance(element, str):
ax.plot(t, lab.species[element]['concentration']
[idx][-num_of_elem:], lw=3)
ax.set_title(element + ' concentration')
elif isinstance(element, (list, tuple)):
for e in element:
ax.plot(t, lab.species[e]['concentration']
[idx][-num_of_elem:], lw=3, label=e)
ax.legend(frameon=1)
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_depths(lab, element, depths=[0, 1, 2, 3, 4], time_to_plot=False):
plt.figure()
ax = plt.subplot(111)
if element == 'Temperature':
plt.title('Temperature at specific depths')
plt.ylabel('Temperature, C')
else:
plt.title(element + ' concentration at specific depths')
plt.ylabel('Concentration')
if time_to_plot:
num_of_elem = int(time_to_plot / lab.dt)
else:
num_of_elem = len(lab.time)
t = lab.time[-num_of_elem:]
plt.xlabel('Time')
for depth in depths:
lbl = str(depth)
plt.plot(t, lab.species[element]['concentration'][int(
depth / lab.dx)][-num_of_elem:], lw=3, label=lbl)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_times(lab, element, time_slices=[0, 1, 2, 3, 4]):
plt.figure()
ax = plt.subplot(111)
if element == 'Temperature':
plt.title('Temperature profile')
plt.xlabel('Temperature, C')
else:
plt.title(element + ' concentration')
plt.xlabel('Concentration')
plt.ylabel('Depth, cm')
for tms in time_slices:
lbl = 'at time: %.2f ' % (tms)
plt.plot(lab.species[element]['concentration'][
:, int(tms / lab.dt)], -lab.x, lw=3, label=lbl)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=2)
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_profiles(lab):
for element in sorted(lab.species):
plot_profile(lab, element)
def plot_profile(lab, element):
plt.figure()
plt.plot(lab.profiles[element], -lab.x,
sns.xkcd_rgb["denim blue"], lw=3, label=element)
if element == 'Temperature':
plt.title('Temperature profile')
plt.xlabel('Temperature, C')
elif element == 'pH':
plt.title('pH profile')
plt.xlabel('pH')
else:
plt.title('%s concentration' % (element, ))
plt.xlabel('Concentration')
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
ax.grid(linestyle='-', linewidth=0.2)
plt.legend()
plt.tight_layout()
return ax
def plot_contourplots(lab, **kwargs):
for element in sorted(lab.species):
contour_plot(lab, element, **kwargs)
def contour_plot(lab, element, labels=False, days=False, last_year=False):
plt.figure()
plt.title(element + ' concentration')
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
if last_year:
k = n - int(1 / lab.dt)
else:
k = 1
if days:
X, Y = np.meshgrid(lab.time[k::n] * 365, -lab.x)
plt.xlabel('Time')
else:
X, Y = np.meshgrid(lab.time[k::n], -lab.x)
plt.xlabel('Time')
z = lab.species[element]['concentration'][:, k - 1:-1:n]
CS = plt.contourf(X, Y, z, 51, cmap=ListedColormap(
sns.color_palette("Blues", 51)), origin='lower')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('%s [M/V]' % element)
if element == 'Temperature':
plt.title('Temperature contour plot')
cbar.ax.set_ylabel('Temperature, C')
if element == 'pH':
plt.title('pH contour plot')
cbar.ax.set_ylabel('pH')
return ax
def plot_contourplots_of_rates(lab, **kwargs):
rate = sorted(lab.estimated_rates)
for r in rate:
contour_plot_of_rates(lab, r, **kwargs)
def contour_plot_of_rates(lab, r, labels=False, last_year=False):
plt.figure()
plt.title('{}'.format(r))
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
if last_year:
k = n - int(1 / lab.dt)
else:
k = 1
z = lab.estimated_rates[r][:, k - 1:-1:n]
# lim = np.max(np.abs(z))
# lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[k::n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(
sns.color_palette("Blues", 51)))
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Rate %s [M/V/T]' % r)
return ax
def plot_contourplots_of_deltas(lab, **kwargs):
elements = sorted(lab.species)
if 'Temperature' in elements:
elements.remove('Temperature')
for element in elements:
contour_plot_of_delta(lab, element, **kwargs)
def contour_plot_of_delta(lab, element, labels=False, last_year=False):
plt.figure()
plt.title('Rate of %s consumption/production' % element)
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
if last_year:
k = n - int(1 / lab.dt)
else:
k = 1
z = lab.species[element]['rates'][:, k - 1:-1:n]
lim = np.max(np.abs(z))
lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[k:-1:n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette(
"RdBu_r", 101)), origin='lower', levels=lim, extend='both')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Rate of %s change $[\Delta/T]$' % element)
return ax
| mit |
joekasp/spectro | analysis.py | 1 | 4007 | import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.decomposition import FactorAnalysis
"""Do PCA, ICA, or factor analysis and return components or coefficients."""
def do_analysis(data, normalize=False, n_comp=10, analysis_type='pca'):
"""
Do component analysis on the input data and return a fit object.
:param data: numpy array, set of images to be analyzed
set of Z images, each X x Y
:param normalize: boolean, True to normalize data before doing analysis
:param n_comp: int, number of components to generate
:param analysis_type: string for the type of analysis to perform
'pca' for Principal Component Analysis (default)
'ica' for Independent Component Analysis
'fa' for Factor Analysis
:return: PCA, ICA, or factor analysis object
"""
data = np.nan_to_num(data)
if normalize:
for idx in range(data.shape[2]):
img = data[:, :, idx]
peak = np.abs(img).max()
img = img / peak
data[:, :, idx] = img
data_r = reshape_image(data)
if analysis_type == 'pca':
fit_object = PCA(n_components=n_comp)
elif analysis_type == 'ica':
fit_object = FastICA(n_components=n_comp, whiten=True)
elif analysis_type == 'fa':
fit_object = FactorAnalysis(n_components=n_comp)
fit_object.fit(data_r)
return fit_object
def get_components(data, normalize=False, n_comp=10, analysis_type='pca'):
"""
Do component analysis on the input data and return set of component images.
:param data: numpy array, set of images to be analyzed
set of Z images, each X x Y
:param normalize: boolean, True to normalize data before doing analysis
:param n_comp: int, number of components to generate
:param analysis_type: string for the type of analysis to perform
'pca' for Principal Component Analysis (default)
'ica' for Independent Component Analysis
'fa' for Factor Analysis
:return: numpy array with dimensions (X, Y, n_comp)
set of n_comp images, each X x Y
"""
data = np.nan_to_num(data)
fit_object = do_analysis(data, normalize, n_comp, analysis_type)
comp = np.zeros((data.shape[0], data.shape[1], fit_object.components_.shape[0]))
for i in range(fit_object.components_.shape[0]):
comp[:, :, i] = fit_object.components_[i].reshape(data.shape[0], data.shape[1])
return comp
def get_projections(data, normalize=False, n_comp=10, analysis_type='pca'):
"""
Do PCA on the input data and return projection of original data onto components.
:param data: numpy array, set of images to be analyzed
set of Z images, each X x Y
:param normalize: boolean, True to normalize data before doing analysis
:param n_comp: int, number of components to generate
:param analysis_type: string for the type of analysis to perform
'pca' for Principal Component Analysis (default)
'ica' for Independent Component Analysis
'fa' for Factor Analysis
:return: numpy array with dimensions (Z, n_comp)
corresponding to the contribution of each component to each original image
"""
data = np.nan_to_num(data)
fit_object = do_analysis(data, normalize, n_comp, analysis_type)
data_r = reshape_image(data)
return fit_object.transform(data_r)
def reshape_image(data):
"""
Reshape each 2D image in the stack into 1D
example: 109 x 109 x 13 original matrix (stack of 13 109x109 images)
becomes 13 x 11881 (since 11881 = 109 x 109)
"""
data_r = np.zeros((data.shape[2], data.shape[0] * data.shape[1]))
for i in range(data.shape[2]):
data_r[i] = data[:,:,i].ravel()
return data_r
| mit |
khkaminska/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
sss1/DeepInteractions | pairwise/read_FIMO_results.py | 2 | 5408 | import numpy as np
import csv
from sklearn.metrics import average_precision_score
from keras.optimizers import Adam # needed to compile prediction model
import h5py
import load_data_pairs as ld # my own scripts for loading data
import build_small_model as bm
import util
fimo_root = '/home/sss1/Desktop/projects/DeepInteractions/pairwise/FIMO/'
data_path = '/home/sss1/Desktop/projects/DeepInteractions/data/uniform_len/original/all_data.h5'
cell_lines = ['GM12878', 'HeLa-S3', 'HUVEC', 'IMR90', 'K562', 'NHEK']
data_types = ['enhancers', 'promoters']
# num_repeats = 5 # number of i.i.d. trials to run; too slow to do :(
random_window_length = 20 # number of bp to randomize at each feature occurence
# Randomize each appearance of the pattern pattern in the data
def randomize_window(sequence):
for base_idx in range(np.shape(sequence)[0]):
sequence[base_idx] = np.zeros(4)
sequence[base_idx, np.random.randint(0, 4)] = 1
# Returns a deep copy of the data, with motif occurrences randomized out.
# A deep copy is made because this is much faster than reloading the data for
# every motif.
# data: (num_sequences X sequence_length X 4) 3-tensor of num_sequences
# one-hot encoded nucleotide sequences of equal-length sequence_length
# motifs_idxs: list of (sample_idx, start_idx, stop_idx) triples
def replace_motifs_in_data(data, motif_idxs):
data_copy = np.copy(data)
for (sample_idx, motif_start, motif_stop) in idxs:
mid = (motif_start + motif_stop)/2
start = max(0, mid - (random_window_length/2))
stop = min(np.shape(data)[1], start + random_window_length)
randomize_window(data_copy[sample_idx, start:stop, :])
return data_copy
for cell_line in cell_lines:
for data_type in data_types:
fimo_path = fimo_root + cell_line + '_' + data_type + '_all_retest/fimo.txt'
# data_path = data_root + cell_line + '/' + cell_line + '_ep_split.h5'
matches = dict() # dict mapping motif_names to lists of (sample_idx, start_idx, stop_idx) triples
print 'Reading and processing FIMO output...'
with open(fimo_path, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter='\t')
row_idx = -1
for row in reader:
row_idx += 1
if row_idx == 0: # skip header row
continue
motif_name = row[0]
if not motif_name in matches: # if this is the first match of that motif
matches[motif_name] = []
sample_idx = int(row[1])
motif_start = int(row[2])
motif_stop = int(row[3])
matches[motif_name].append((sample_idx, motif_start, motif_stop))
print 'Identified ' + str(len(matches)) + ' distinct motifs.'
print 'Loading original data...'
# X_enhancers_original, X_promoters_original, y = ld.load_hdf5_ep_split(data_path)
with h5py.File(data_path, 'r') as hf:
X_enhancers_original = np.array(hf.get(cell_line + '_X_enhancers')).transpose((0, 2, 1))
X_promoters_original = np.array(hf.get(cell_line + '_X_promoters')).transpose((0, 2, 1))
y = np.array(hf.get(cell_line + 'labels'))
print 'np.shape(X_enhancers_original): ' + str(np.shape(X_enhancers_original))
print 'np.shape(X_promoters_original): ' + str(np.shape(X_promoters_original))
print 'np.shape(y): ' + str(np.shape(y))
model = bm.build_model(use_JASPAR = False)
print 'Compiling model...'
opt = Adam(lr = 1e-5)
model.compile(loss = 'binary_crossentropy',
optimizer = opt,
metrics = ["accuracy"])
print 'Loading ' + cell_line + ' ' + data_type + ' model weights...'
model.load_weights('/home/sss1/Desktop/projects/DeepInteractions/weights/' + cell_line + '-basic.hdf5')
out_root = '/home/sss1/Desktop/projects/DeepInteractions/feature_importances/SPEID/from_HOCOMOCO_motifs/'
out_path = out_root + cell_line + '_' + data_type + '_feature_importance.csv'
print 'Running predictions on original data'
y_score = model.predict([X_enhancers_original, X_promoters_original], batch_size = 100, verbose = 1)
true_AUPR = average_precision_score(y, y_score)
print 'True AUPR is ' + str(true_AUPR)
true_MS = y_score.mean()
print 'True MS is ' + str(true_MS)
with open(out_path, 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter = ',')
writer.writerow(['Motif Name', 'Motif Count', 'AUPR Difference', 'MS Difference'])
for motif, idxs in matches.iteritems():
print 'Randomizing ' + str(len(idxs)) + ' occurrences of motif ' + motif + ' in ' + cell_line + ' ' + data_type + '...'
if data_type == 'enhancers':
X_enhancers = replace_motifs_in_data(X_enhancers_original, idxs)
X_promoters = X_promoters_original
elif data_type == 'promoters':
X_enhancers = X_enhancers_original
X_promoters = replace_motifs_in_data(X_promoters_original, idxs)
else:
raise ValueError
print 'Running predictions on motif ' + motif + '...'
y_score = model.predict([X_enhancers, X_promoters], batch_size = 200, verbose = 1)
AUPR = average_precision_score(y, y_score)
print 'AUPR after removing motif ' + motif + ' was ' + str(AUPR) + '\n'
MS = y_score.mean()
print 'MS after removing motif ' + motif + ' was ' + str(MS) + '\n'
writer.writerow([motif, str(len(idxs)), str(true_AUPR - AUPR), str(true_MS - MS)])
| gpl-3.0 |
gclenaghan/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
echohenry2006/tvb-library | contrib/from_articles/region_deterministic_bnm_g2d_d.py | 5 | 5288 | # -*- coding: utf-8 -*-
"""
What:
Reproduces Figure 11, of Sanz-Leon P., Knock, S. A., Spiegler, A. and Jirsa V.
Mathematical framework for large-scale brain network modelling in The Virtual Brain.
Neuroimage, 2014, (in review)
Needs:
A working installation of tvb
Run:
python region_deterministic_bnm_g2d_d.py -s True -f True
#Subsequent calls can be made with:
python region_deterministic_bnm_g2d_d.py -f True
.. author:: Paula Sanz-Leon
"""
import numpy
import argparse
from tvb.simulator.lab import *
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 10, 7 # that's default image size for this interactive session
pylab.rcParams.update({'font.size': 22})
pylab.rcParams.update({'axes.linewidth': 3})
parser = argparse.ArgumentParser(description='Reproduce results of Figure 2 presented in Fitzhugh 1961')
parser.add_argument('-s','--sim', help='Run the simulations', default=False)
parser.add_argument('-f','--fig', help='Plot the figures', default=False)
args = vars(parser.parse_args())
idx = ['d0', 'd1', 'd2']
a = [0.7, 0.7, 0.0]
beta = [0.8, 0.8, 0.0]
I = [0.0, -0.4, 0.0]
gcs = [0.0, 0.0, 0.0]
#Initial values
ic_v = [-2.0, -2.0, -2.0]
ic_w = [-3.0, -3.0, -3.0]
simulation_length = 512
speed = 4.0
if args['sim']:
for i in range(3):
oscilator = models.Generic2dOscillator(b=-1.0, c=0., e=0.0, f=1/3., d=0.075, g=1.0, alpha=1.0, tau=3., gamma = 1.0,
a=a[i],
beta=beta[i],
I=I[i])
white_matter = connectivity.Connectivity(load_default=True)
white_matter.speed = numpy.array([speed])
# 0, 0.0042, 0.042
white_matter_coupling = coupling.Linear(a=gcs[i])
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=2**-4)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=2**-2)
#Bundle them
what_to_watch = (momo, mama)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = oscilator, connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint, monitors = what_to_watch)
sim.configure()
# Set initial conditions
sim.history[:, 0, :, : ] = ic_v[i]
sim.history[:, 1, :, : ] = ic_w[i]
LOG.info("Starting simulation...")
#Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=simulation_length):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
RAW = numpy.asarray(raw_data)
TAVG = numpy.asarray(tavg_data)
# <codecell>
numpy.save('region_deterministic_bnm_g2d_raw_' + idx[i] + '.npy', RAW)
numpy.save('region_deterministic_bnm_g2d_tavg_' + idx[i] + '.npy', TAVG)
numpy.save('region_deterministic_bnm_g2d_rawtime_' + idx[i] + '.npy', raw_time)
numpy.save('region_deterministic_bnm_g2d_tavgtime_' + idx[i] + '.npy', tavg_time)
if args['fig']:
for i in range(3):
RAW = numpy.load('region_deterministic_bnm_g2d_raw_' + idx[i] + '.npy')
raw_time = numpy.load('region_deterministic_bnm_g2d_rawtime_' + idx[i] + '.npy')
fig=figure(1)
clf()
ax1 = subplot(1, 2, 1)
plot(raw_time, RAW[:, 0, :, 0],'k', alpha=0.013)
plot(raw_time, RAW[:, 1, :, 0],'r', alpha=0.013)
plot(raw_time, RAW[:, 0, :, 0].mean(axis=1), 'k', linewidth=3)
plot(raw_time, RAW[:, 1, :, 0].mean(axis=1), 'r', linewidth=3)
xlabel('time[ms]')
ylabel('[au]')
ylim([-3.5, 3.5])
xlim([0, simulation_length])
xticks((0, simulation_length /2. , simulation_length),
('0', str(int(simulation_length //2)), str(simulation_length)))
yticks((-3, 0,3), ('-3', '0', '3'))
for label in ax1.get_yticklabels():
label.set_fontsize(24)
for label in ax1.get_xticklabels():
label.set_fontsize(24)
ax = subplot(1, 2, 2)
plot(RAW[:, 0, :, 0], RAW[:, 1, :, 0], 'b', alpha=0.013, linewidth=3)
plot(RAW[:, 0, :, 0].mean(axis=1), RAW[:, 1, :, 0].mean(axis=1), 'b', alpha=1.)
plot(RAW[0, 0, :, 0], RAW[0, 1, :, 0], 'bo', alpha=0.15, linewidth=3)
ylim([-3.5, 3.5])
xlim([-3.5, 3.5])
xticks((-3, 0, 3), ('-3', '0', '3'))
yticks((-3, 0, 3), ('-3', '0', '3'))
ax.yaxis.set_label_position("right")
for label in ax.get_xticklabels():
label.set_fontsize(24)
for label in ax.get_yticklabels():
label.set_fontsize(24)
xlabel(r'$V$')
ylabel(r'$W$')
fig_name = 'G2D_default_speed_' + str(int(speed)) + '-config_gcs-' + idx[i] + '.pdf'
savefig(fig_name)
#EoF | gpl-2.0 |
dagophil/vigra | vigranumpy/lib/__init__.py | 2 | 99542 | #######################################################################
#
# Copyright 2009-2010 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# [email protected] or
# [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
from __future__ import division, print_function
import sys, os, time, math
from numbers import Number
from multiprocessing import cpu_count
try:
import pylab
except Exception as e:
pass
_vigra_path = os.path.abspath(os.path.dirname(__file__))
_vigra_doc_path = _vigra_path + '/doc/vigranumpy/index.html'
if sys.platform.startswith('win'):
# On Windows, add subdirectory 'dlls' to the PATH in order to find
# the DLLs vigranumpy depends upon. Since this directory appears
# at the end of PATH, already installed DLLs are always preferred.
_vigra_dll_path = _vigra_path + '/dlls'
if os.path.exists(_vigra_dll_path):
os.putenv('PATH', os.getenv('PATH') + os.pathsep + _vigra_dll_path)
def _fallbackModule(moduleName, message):
'''This function installs a fallback module with the given 'moduleName'.
All function calls into this module raise an ImportError with the
given 'message' that hopefully tells the user why the real module
was not available.
'''
import sys
moduleClass = vigranumpycore.__class__
class FallbackModule(moduleClass):
def __init__(self, name):
moduleClass.__init__(self, name)
self.__name__ = name
def __getattr__(self, name):
if name.startswith('__'):
return moduleClass.__getattribute__(self, name)
try:
return moduleClass.__getattribute__(self, name)
except AttributeError:
raise ImportError("""%s.%s: %s""" % (self.__name__, name, self.__doc__))
module = FallbackModule(moduleName)
sys.modules[moduleName] = module
module.__doc__ = """Import of module '%s' failed.\n%s""" % (moduleName, message)
if not os.path.exists(_vigra_doc_path):
_vigra_doc_path = "http://hci.iwr.uni-heidelberg.de/vigra/doc/vigranumpy/index.html"
__doc__ = '''VIGRA Computer Vision Library
HTML documentation is available in
%s
Help on individual functions can be obtained via their doc strings
as usual.
The following sub-modules group related functionality:
* arraytypes (VigraArray and axistags, automatically imported into 'vigra')
* ufunc (improved array arithmetic, automatically used by VigraArray)
* impex (image and array I/O)
* colors (color space transformations)
* filters (spatial filtering, e.g. smoothing)
* sampling (image and array re-sampling and interpolation)
* fourier (Fourier transform and Fourier domain filters)
* analysis (image analysis and segmentation)
* learning (machine learning and classification)
* noise (noise estimation and normalization)
* geometry (geometric algorithms, e.g. convex hull)
* histogram (histograms and channel representation)
* graphs (grid graphs / graphs / graph algorithms)
* utilities (priority queues)
''' % _vigra_doc_path
from .__version__ import version
import vigra.vigranumpycore as vigranumpycore
import vigra.arraytypes as arraytypes
import vigra.impex as impex
import vigra.sampling as sampling
import vigra.filters as filters
import vigra.analysis as analysis
import vigra.learning as learning
import vigra.colors as colors
import vigra.noise as noise
import vigra.geometry as geometry
import vigra.optimization as optimization
import vigra.histogram as histogram
import vigra.graphs as graphs
import vigra.utilities as utilities
import vigra.blockwise as blockwise
sampling.ImagePyramid = arraytypes.ImagePyramid
try:
import vigra.fourier as fourier
except Exception as e:
_fallbackModule('fourier',
'''
%s
Make sure that the fftw3 libraries are found during compilation and import.
They may be downloaded at http://www.fftw.org/.''' % str(e))
import fourier
class Timer:
def __init__(self, name, verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.verbose:
print(self.name, "...")
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if self.verbose :
print("... took ", self.interval, "sec")
# portable way to inject a metaclass (taken from six.py)
def _with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
# import most frequently used functions
from vigra.arraytypes import *
standardArrayType = arraytypes.VigraArray
defaultAxistags = arraytypes.VigraArray.defaultAxistags
from vigra.vigranumpycore import ChunkedArrayFull, ChunkedArrayLazy, ChunkedArrayCompressed, ChunkedArrayTmpFile, Compression
try:
from vigra.vigranumpycore import ChunkedArrayHDF5, HDF5Mode
except:
pass
from vigra.impex import readImage, readVolume
def readHDF5(filenameOrGroup, pathInFile, order=None):
'''Read an array from an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name
of the dataset to be read, including intermediate groups. If the
first argument is a group object, the path is relative to this
group, otherwise it is relative to the file's root group.
If the dataset has an attribute 'axistags', the returned array
will have type :class:`~vigra.VigraArray` and will be transposed
into the given 'order' ('vigra.VigraArray.defaultOrder'
will be used if no order is given). Otherwise, the returned
array is a plain 'numpy.ndarray'. In this case, order='F' will
return the array transposed into Fortran order.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup, 'r')
group = file['/']
try:
dataset = group[pathInFile]
if not isinstance(dataset, h5py.highlevel.Dataset):
raise IOError("readHDF5(): '%s' is not a dataset" % pathInFile)
data = dataset.value
axistags = dataset.attrs.get('axistags', None)
if axistags is not None:
data = data.view(arraytypes.VigraArray)
data.axistags = arraytypes.AxisTags.fromJSON(axistags)
if order is None:
order = arraytypes.VigraArray.defaultOrder
data = data.transposeToOrder(order)
else:
if order == 'F':
data = data.transpose()
elif order not in [None, 'C', 'A']:
raise IOError("readHDF5(): unsupported order '%s'" % order)
finally:
if file is not None:
file.close()
return data
def writeHDF5(data, filenameOrGroup, pathInFile, compression=None, chunks=None):
'''Write an array to an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name of the
dataset to be written, including intermediate groups. If the first
argument is a group object, the path is relative to this group,
otherwise it is relative to the file's root group. If the dataset already
exists, it will be replaced without warning.
If 'data' has an attribute 'axistags', the array is transposed to
numpy order before writing. Moreover, the axistags will be
stored along with the data in an attribute 'axistags'.
'compression' can be set to 'gzip', 'szip' or 'lzf'
gzip (standard compression),
szip (available if HDF5 is compiled with szip. Faster compression, limited types),
lzf (very fast compression, all types).
The 'lzf' compression filter is many times faster than 'gzip'
at the cost of a lower compresion ratio.
Chunking is disabled by default. When 'chunks' is set to True
chunking is enabled and a chunk shape is determined automatically.
Alternatively a chunk shape can be specified explicitly by passing
a tuple of the desired shape.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup)
group = file['/']
try:
levels = pathInFile.split('/')
for groupname in levels[:-1]:
if groupname == '':
continue
g = group.get(groupname, default=None)
if g is None:
group = group.create_group(groupname)
elif not isinstance(g, h5py.highlevel.Group):
raise IOError("writeHDF5(): invalid path '%s'" % pathInFile)
else:
group = g
dataset = group.get(levels[-1], default=None)
if dataset is not None:
if isinstance(dataset, h5py.highlevel.Dataset):
del group[levels[-1]]
else:
raise IOError("writeHDF5(): cannot replace '%s' because it is not a dataset" % pathInFile)
try:
data = data.transposeToNumpyOrder()
except:
pass
dataset = group.create_dataset(levels[-1], data=data, compression=compression, chunks=chunks)
if hasattr(data, 'axistags'):
dataset.attrs['axistags'] = data.axistags.toJSON()
finally:
if file is not None:
file.close()
impex.readHDF5 = readHDF5
readHDF5.__module__ = 'vigra.impex'
impex.writeHDF5 = writeHDF5
writeHDF5.__module__ = 'vigra.impex'
from .filters import convolve, gaussianSmoothing
from .sampling import resize
def gaussianDerivative(array, sigma, orders, out=None, window_size=0.0):
'''
Convolve 'array' with a Gaussian derivate kernel of the given 'orders'.
'orders' must contain a list of integers >= 0 for each non-channel axis.
Each channel of the array is treated independently. If 'sigma' is a single
value, the kernel size is equal in each dimension. If 'sigma' is a tuple
or list of values of appropriate length, a different size is used for each axis.
'window_size' specifies the ratio between the filter scale and the size of
the filter window. Use values around 2.0 to speed-up the computation for the
price of increased cut-off error, and values >= 4.0 for very accurate results.
The window size is automatically determined for the default value 0.0.
For the first and second derivatives, you can also use :func:`gaussianGradient`
and :func:`hessianOfGaussian`.
'''
if hasattr(array, 'dropChannelAxis'):
if array.dropChannelAxis().ndim != len(orders):
raise RuntimeError("gaussianDerivative(): len(orders) doesn't match array dimension.")
else:
if array.ndim != len(orders):
raise RuntimeError("gaussianDerivative(): len(orders) doesn't match array dimension.")
try:
len(sigma)
except:
sigma = [sigma]*len(orders)
kernels = tuple([filters.gaussianDerivativeKernel(s, o, window_size=window_size) \
for s, o in zip(sigma, orders)])
return filters.convolve(array, kernels, out)
filters.gaussianDerivative = gaussianDerivative
gaussianDerivative.__module__ = 'vigra.filters'
# import enums
CLOCKWISE = sampling.RotationDirection.CLOCKWISE
COUNTER_CLOCKWISE = sampling.RotationDirection.COUNTER_CLOCKWISE
UPSIDE_DOWN = sampling.RotationDirection.UPSIDE_DOWN
CompleteGrow = analysis.SRGType.CompleteGrow
KeepContours = analysis.SRGType.KeepContours
StopAtThreshold = analysis.SRGType.StopAtThreshold
_selfdict = globals()
def searchfor(searchstring):
'''Scan all vigra modules to find classes and functions containing
'searchstring' in their name.
'''
for attr in _selfdict.keys():
contents = dir(_selfdict[attr])
for cont in contents:
if ( cont.upper().find(searchstring.upper()) ) >= 0:
print(attr+"."+cont)
# FIXME: use axistags here
def imshow(image,show=True, **kwargs):
'''Display a scalar or RGB image by means of matplotlib.
If the image does not have one or three channels, an exception is raised.
The image will be automatically scaled to the range 0...255 when its dtype
is not already 'uint8' and neither 'cmap' nor 'norm' are specified in kwargs
'''
import matplotlib.pylab
if not hasattr(image, 'axistags'):
plot = matplotlib.pyplot.imshow(image, **kwargs)
if show:
matplotlib.pylab.show()
return plot
image = image.transposeToNumpyOrder()
if image.channels == 1:
image = image.dropChannelAxis().view(numpy.ndarray)
if 'cmap' in kwargs.keys():
cmap = kwargs.pop('cmap')
else:
cmap = matplotlib.cm.gray
if 'norm' in kwargs.keys():
norm = kwargs.pop('norm')
else:
norm = matplotlib.cm.colors.Normalize()
plot = matplotlib.pyplot.imshow(image, cmap=cmap, norm=norm, **kwargs)
if show:
matplotlib.pylab.show()
return plot
elif image.channels == 3:
if image.dtype != numpy.uint8:
out = image.__class__(image.shape, dtype=numpy.uint8, axistags=image.axistags)
image = colors.linearRangeMapping(image, newRange=(0.0, 255.0), out=out)
plot = matplotlib.pyplot.imshow(image.view(numpy.ndarray), **kwargs)
if show:
matplotlib.pylab.show()
return plot
else:
raise RuntimeError("vigra.imshow(): Image must have 1 or 3 channels.")
def multiImshow(images,shape, show=True):
nImg = len(images)
f = pylab.figure()
s = tuple(shape)
for c,iname in enumerate(images.keys()):
data,itype = images[iname]
if itype == 'img':
ax1 = f.add_subplot(s[0],s[1],c+1)
imshow(data,show=False)
ax1.set_title(iname)
pylab.axis('off')
if show :
pylab.show()
def segShow(img,labels,edgeColor=(0,0,0),alpha=0.3,show=False,returnImg=False,r=0):
img = numpy.squeeze(img)
if img.ndim ==2:
img = numpy.concatenate( [ img[:,:,None]]*3 ,axis=2).astype(numpy.float32)
img = taggedView(img, 'xyc')
labels = numpy.squeeze(labels)
crackedEdges = analysis.regionImageToCrackEdgeImage(labels+1).squeeze()
#print("cracked shape",crackedEdges.shape)
whereEdge = numpy.where(crackedEdges==0)
whereNoEdge = numpy.where(crackedEdges!=0)
crackedEdges[whereEdge] = 1
crackedEdges[whereNoEdge] = 0
if r>0 :
res = filters.discDilation(crackedEdges.astype(numpy.uint8),int(r) )
whereEdge = numpy.where(res==1)
imgToDisplay = resize(img,numpy.squeeze(crackedEdges).shape)
imgToDisplay-=imgToDisplay.min()
imgToDisplay/=imgToDisplay.max()
for c in range(3):
ic = imgToDisplay[:,:,c]
ic[whereEdge]=(1.0-alpha)*edgeColor[c] + alpha*ic[whereEdge]
if returnImg:
return imgToDisplay
return imshow(imgToDisplay,show=show)
def nestedSegShow(img,labels,edgeColors=None,scale=1,show=False,returnImg=False):
shape=(labels.shape[0]*scale,labels.shape[1]*scale)
if scale!=1:
img=vigra.resize(img,shape)
assert numpy.squeeze(labels).ndim==3
nSegs = labels.shape[2]
if edgeColors is None :
edgeColors=numpy.ones([nSegs,4])
a =numpy.array([0,0,0.0,0.6],dtype=numpy.float32)
b =numpy.array([1,0,0,0.4],dtype=numpy.float32)
for s in range(nSegs):
f=float(s)/float(nSegs-1)
edgeColors[s,:]=f*b + (1.0-f)*a
tShape=(img.shape[0]*2-1,img.shape[1]*2-1)
imgToDisplay = resize(img,tShape)
imgToDisplay-=imgToDisplay.min()
imgToDisplay/=imgToDisplay.max()
imgIn = imgToDisplay.copy()
for si in range(nSegs):
l = labels[:,:,si].copy()
if scale!=1:
l=resize(l.astype(numpy.float32),shape,order=0).astype(numpy.uint32)
crackedEdges = analysis.regionImageToCrackEdgeImage(l)
whereEdge = numpy.where(crackedEdges==0)
if len(edgeColors[si])<4:
alpha = 0.0
else:
alpha = edgeColors[si,3]
for c in range(3):
icI = imgIn[:,:,c]
ic = imgToDisplay[:,:,c]
ic[whereEdge]=(1.0-alpha) * edgeColors[si,c] + alpha*icI[whereEdge]
if returnImg:
return imgToDisplay
return imshow(imgToDisplay,show=show)
def show():
import matplotlib.pylab
matplotlib.pylab.show()
# auto-generate code for additional Kernel generators:
def _genKernelFactories(name):
for oldName in dir(eval('filters.'+name)):
if not oldName.startswith('init'):
continue
#remove init from beginning and start with lower case character
newPrefix = oldName[4].lower() + oldName[5:]
if newPrefix == "explicitly":
newPrefix = "explict"
newName = newPrefix + 'Kernel'
if name == 'Kernel2D':
newName += '2D'
code = '''def %(newName)s(*args, **kw):
k = filters.%(name)s()
k.%(oldName)s(*args, **kw)
return k
%(newName)s.__doc__ = filters.%(name)s.%(oldName)s.__doc__
filters.%(newName)s=%(newName)s
''' % {'oldName': oldName, 'newName': newName, 'name': name}
exec(code)
_genKernelFactories('Kernel1D')
_genKernelFactories('Kernel2D')
del _genKernelFactories
# define watershedsUnionFind()
def _genWatershedsUnionFind():
def watershedsUnionFind(image, neighborhood=None, out = None):
'''Compute watersheds of an image using the union find algorithm.
If 'neighborhood' is 'None', it defaults to 8-neighborhood for 2D inputs
and 6-neighborhood for 3D inputs.
Calls :func:`watersheds` with parameters::\n\n
watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
'''
if neighborhood is None:
neighborhood = 8 if image.spatialDimensions == 2 else 6
return analysis.watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
watershedsUnionFind.__module__ = 'vigra.analysis'
analysis.watershedsUnionFind = watershedsUnionFind
_genWatershedsUnionFind()
del _genWatershedsUnionFind
# define watershedsReoptimization)
def _genWatershedsReoptimization():
def watershedsReoptimization(labels,edgeIndicator,shrinkN,out=None,visu=False):
# do unseeding
#if visu :
# import matplotlib,numpy
# import pylab
# # A random colormap for matplotlib
# cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
# pylab.imshow ( numpy.swapaxes(labels,0,1) , cmap = cmap)
# pylab.show()
seeds=analysis.segToSeeds(labels,int(shrinkN))
if visu :
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
pylab.imshow ( numpy.swapaxes(seeds,0,1) , cmap = cmap)
pylab.show()
#if seeds.ndim==2:
# seeds=analysis.labelImageWithBackground(seeds)
#elif seeds.ndim==3:
# seeds=analysis.labelVolumeWithBackground(seeds)
#else :
# raise RuntimeError("only implemented for 2d and 3d")
if visu :
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
pylab.imshow ( numpy.swapaxes(seeds,0,1) , cmap = cmap)
pylab.show()
return analysis.watersheds(edgeIndicator,seeds=seeds,out=out)
watershedsReoptimization.__module__ = 'vigra.analysis'
analysis.watershedsReoptimization = watershedsReoptimization
_genWatershedsReoptimization()
del _genWatershedsReoptimization
# define tensor convenience functions
def _genTensorConvenienceFunctions():
def hessianOfGaussianEigenvalues(image, scale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the Hessian of Gaussian at the given scale
for a scalar image or volume.
Calls :func:`hessianOfGaussian` and :func:`tensorEigenvalues`.
'''
hessian = filters.hessianOfGaussian(image, scale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(hessian, out=out)
hessianOfGaussianEigenvalues.__module__ = 'vigra.filters'
filters.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
def structureTensorEigenvalues(image, innerScale, outerScale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the structure tensor at the given scales
for a scalar or multi-channel image or volume.
Calls :func:`structureTensor` and :func:`tensorEigenvalues`.
'''
st = filters.structureTensor(image, innerScale, outerScale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(st, out=out)
structureTensorEigenvalues.__module__ = 'vigra.filters'
filters.structureTensorEigenvalues = structureTensorEigenvalues
_genTensorConvenienceFunctions()
del _genTensorConvenienceFunctions
# define feature convenience functions
def _genFeaturConvenienceFunctions():
def supportedFeatures(array):
'''Return a list of feature names that are available for the given array. These feature
names are the valid inputs to a call of :func:`extractFeatures`. E.g., to compute
just the first two features in the list, use::
f = vigra.analysis.supportedFeatures(array)
print("Computing features:", f[:2])
r = vigra.analysis.extractFeatures(array, features=f[:2])
'''
return analysis.extractFeatures(array, None).supportedFeatures()
supportedFeatures.__module__ = 'vigra.analysis'
analysis.supportedFeatures = supportedFeatures
def supportedRegionFeatures(array, labels):
'''Return a list of feature names that are available for the given array and label array.
These feature names are the valid inputs to a call of
:func:`extractRegionFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedRegionFeatures(array, labels)
print("Computing features:", f[:2])
r = vigra.analysis.extractRegionFeatures(array, labels, features=f[:2])
'''
return analysis.extractRegionFeatures(array, labels, None).supportedFeatures()
supportedRegionFeatures.__module__ = 'vigra.analysis'
analysis.supportedRegionFeatures = supportedRegionFeatures
def supportedConvexHullFeatures(labels):
'''Return a list of Convex Hull feature names that are available for the given 2D label array.
These Convex Hull feature names are the valid inputs to a call of
:func:`extractConvexHullFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedConvexHullFeatures(labels)
print("Computing Convex Hull features:", f[:2])
r = vigra.analysis.extractConvexHullFeatures(labels, features=f[:2])
'''
try:
return analysis.extractConvexHullFeatures(labels, list_features_only=True)
except:
return []
supportedConvexHullFeatures.__module__ = 'vigra.analysis'
analysis.supportedConvexHullFeatures = supportedConvexHullFeatures
def supportedSkeletonFeatures(labels):
'''Return a list of Skeleton feature names that are available for the given 2D label array.
These Skeleton feature names are the valid inputs to a call of
:func:`extractSkeletonFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedSkeletonFeatures(labels)
print("Computing Skeleton features:", f[:2])
r = vigra.analysis.extractSkeletonFeatures(labels, features=f[:2])
'''
try:
return analysis.extractSkeletonFeatures(labels, list_features_only=True)
except:
return []
supportedSkeletonFeatures.__module__ = 'vigra.analysis'
analysis.supportedSkeletonFeatures = supportedSkeletonFeatures
# implement the read-only part of the 'dict' API in FeatureAccumulator and RegionFeatureAccumulator
def __len__(self):
return len(self.keys())
def __iter__(self):
return self.keys().__iter__()
def __contains__(self, key):
try:
return self.isActive(key)
except:
return False
def has_key(self, key):
self.__contains__(key)
if sys.version_info[0] < 3:
def values(self):
return [self[k] for k in self.keys()]
def items(self):
return [(k, self[k]) for k in self.keys()]
else:
def values(self):
return self.itervalues()
def items(self):
return self.iteritems()
def iterkeys(self):
return self.keys().__iter__()
def itervalues(self):
for k in self.keys():
yield self[k]
def iteritems(self):
for k in self.keys():
yield (k, self[k])
for k in ['__len__', '__iter__', '__contains__', 'has_key', 'values', 'items', 'iterkeys', 'itervalues', 'iteritems']:
setattr(analysis.FeatureAccumulator, k, eval(k))
setattr(analysis.RegionFeatureAccumulator, k, eval(k))
_genFeaturConvenienceFunctions()
del _genFeaturConvenienceFunctions
MetricType = graphs.MetricType
# define grid graph convenience functions
# and extend grid graph classes
def _genGridGraphConvenienceFunctions():
def gridGraph(shape,directNeighborhood=True):
'''Return a grid graph with certain shape.
Parameters:
- shape -- shape of the image
- directNeighborhood -- use 4 (True) or 8 (False) neighborhood (default: True)
Returns:
- grid graph
use::
>>> # 4-connected
>>> g = vigra.graps.gridGraph(shape=[10,20])
>>> g.nodeNum
200
>>> # 8-connected
>>> g = vigra.graps.gridGraph(shape=[10,20],directNeighborhood=False)
'''
if(len(shape)==2):
return graphs.GridGraphUndirected2d(shape,directNeighborhood)
elif(len(shape)==3):
return graphs.GridGraphUndirected3d(shape,directNeighborhood)
else:
raise RuntimeError("GridGraph is only implemented for 2d and 3d grids")
gridGraph.__module__ = 'vigra.graphs'
graphs.gridGraph = gridGraph
# extend grid graph via meta classes
for cls in [graphs.GridGraphUndirected2d, graphs.GridGraphUndirected3d] :
metaCls = cls.__class__
class gridGraphInjectorMeta(metaCls):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in dict.items():
setattr(b,k,v)
return type.__init__(self, name, bases, dict)
class gridGraphInjector(_with_metaclass(gridGraphInjectorMeta, object)):
pass
##inject some methods in the point foo
class moreGridGraph(gridGraphInjector, cls):
@property
def shape(self):
""" shape of grid graph"""
return self.intrinsicNodeMapShape()
def nodeSize(self):
""" node map filled with 1.0"""
size = graphs.graphMap(self,item='node',dtype=numpy.float32)
size[:]=1
return size
def edgeLengths(self):
""" node map filled with 1.0"""
size = graphs.graphMap(self,item='edge',dtype=numpy.float32)
size[:]=1
return size
def mergeGraph(self):
if len(self.shape)==2:
mg = graphs.GridGraphUndirected2dMergeGraph(self)
else:
mg = graphs.GridGraphUndirected3dMergeGraph(self)
return mg
def isGridGraph(obj):
""" check if obj is gridGraph"""
return isinstance(obj,(graphs.GridGraphUndirected2d , graphs.GridGraphUndirected3d))
def isGridGraph2d(obj):
""" check if obj is gridGraph"""
return isinstance(obj,graphs.GridGraphUndirected2d)
isGridGraph.__module__ = 'vigra.graphs'
graphs.isGridGraph = isGridGraph
isGridGraph2d.__module__ = 'vigra.graphs'
graphs.isGridGraph2d = isGridGraph2d
_genGridGraphConvenienceFunctions()
del _genGridGraphConvenienceFunctions
def _genGraphConvenienceFunctions():
def listGraph(nodes=0,edges=0):
''' Return an empty directed graph
Parameters :
- nodes : number of nodes to reserveEdges
- edges : number of edges to reserve
Returns :
- graph
'''
return graphs.AdjacencyListGraph(nodes,edges)
listGraph.__module__ = 'vigra.graphs'
graphs.listGraph = listGraph
def intrinsicGraphMapShape(graph,item):
""" Intrinsic shape of node/edge/arc-map for a given graph.
Node edge and arc maps are stored in numpy arrays by default.
The instric shape may not be confused with the number
of nodes/edges/arcs. The instric shape is used to
allocate a numpy are which can store data for nodes/arcs/edgeSizes
of a given graph.
Parameters:
- graph : input graph to get the shape for
- item : item must be ``'node'`` , ``'edge'`` or ``'arc'``
Returns:
- shape as tuple
"""
if item=='edge':
return graph.intrinsicEdgeMapShape()
elif item=='node':
return graph.intrinsicNodeMapShape()
elif item=='arc':
return graph.intrinsicArcMapShape()
else :
raise RuntimeError("%s is not valid,must be 'edge','node' or 'arc' "%item)
intrinsicGraphMapShape.__module__ = 'vigra.graphs'
graphs.intrinsicGraphMapShape = intrinsicGraphMapShape
def graphMap(graph,item,dtype=numpy.float32,channels=1,addChannelDim=False):
""" Return a graph map for a given graph item (``'node'`` , ``'edge'`` or ``'arc'``).
Parameters:
- graph : graph to get a graph map for
- item : ``'node'`` , ``'edge'`` or ``'arc'``
- dtype : desired dtype
- channels : number of channels (default: 1)
- addChannelDim -- add an explicit channelDim :(default: False)
only useful if channels == 1
Returns:
- graphmap as numpy.ndarray / VigraArray
"""
s = intrinsicGraphMapShape(graph,item)
intrDim = len(s)
if(channels==1) and addChannelDim==False:
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'x')
elif intrDim == 2:
return taggedView(a,'xy')
elif intrDim == 3:
return taggedView(a,'xyz')
elif intrDim == 4:
return taggedView(a,'xyzt')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
else:
s = s+(channels,)
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'xc')
elif intrDim == 2:
return taggedView(a,'xyc')
elif intrDim == 3:
return taggedView(a,'xyzc')
elif intrDim == 4:
return taggedView(a,'xyztc')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
def graphMap2(graph,item,dtype=numpy.float32,channels=1,addChannelDim=False):
""" Return a graph map for a given graph item (``'node'`` , ``'edge'`` or ``'arc'``).
Parameters:
- graph : graph to get a graph map for
- item : ``'node'`` , ``'edge'`` or ``'arc'``
- dtype : desired dtype
- channels : number of channels (default: 1)
- addChannelDim -- add an explicit channelDim :(default: False)
only useful if channels == 1
Returns:
- graphmap as numpy.ndarray / VigraArray
"""
s = intrinsicGraphMapShape(graph,item)
intrDim = len(s)
if(channels==1) and addChannelDim==False:
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'x')
elif intrDim == 2:
return taggedView(a,'xy')
elif intrDim == 3:
return taggedView(a,'xyz')
elif intrDim == 4:
return taggedView(a,'xyzt')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
else:
s = s+(channels,)
a=numpy.zeros(shape=s,dtype=dtype)
if intrDim == 1:
return taggedView(a,'xc')
elif intrDim == 2:
return taggedView(a,'xyc')
elif intrDim == 3:
return taggedView(a,'xyzc')
elif intrDim == 4:
return taggedView(a,'xyztc')
else :
raise RuntimeError("graphs with intrisic dimension >4 are not supported")
graphMap.__module__ = 'vigra.graphs'
graphs.graphMap = graphMap
def mergeGraph(graph):
""" get a merge graph from input graph.
A merge graph might be usefull for hierarchical clustering
"""
#mg = graph.mergeGraph()
mg = graphs.__mergeGraph(graph)
#mg.__base_graph__=graph
return mg
mergeGraph.__module__ = 'vigra.graphs'
graphs.mergeGraph = mergeGraph
INVALID = graphs.Invalid()
graphs.INVALID = INVALID
class ShortestPathPathDijkstra(object):
def __init__(self,graph):
""" shortest path computer
Keyword Arguments:
- graph : input graph
"""
self.pathFinder = graphs._shortestPathDijkstra(graph)
self.graph=graph
self.source = None
self.target = None
def run(self,weights,source,target=None):
""" run shortest path search
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- target : target node (default: None)
If target node is None, the shortest path
to all nodes!=source is computed
"""
self.source = source
self.target = target
if target is None:
self.pathFinder.run(weights,source)
else:
self.pathFinder.run(weights,source,target)
return self
def runIgnoreLargeWeights(self,weights,source,val):
""" run shortest path search, nodes with all edge weights larger than val will be ignored
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- val : upper bound
"""
self.source = source
self.target = None
self.pathFinder.runIgnoreLargeWeights(weights,source,val)
return self
def path(self,target=None,pathType='coordinates'):
""" get the shortest path from source to target
Keyword Arguments:
- weights : edge weights encoding distance from two adjacent nodes
- source : source node
- target : target node (default: None)
If target node is None, the target specified
by 'run' is used.
pathType : 'coordinates' or 'ids' path (default: 'coordinates')
"""
if target is None:
assert self.target is not None
target=self.target
if pathType=='coordinates':
return self.pathFinder.nodeCoordinatePath(target)
elif pathType == 'ids':
return self.pathFinder.nodeIdPath(target)
def distance(self,target=None):
""" get distance from source to target
Keyword Arguments:
- target : target node (default: None)
If target node is None, the target specified
by 'run' is used.
"""
if target is None:
assert self.target is not None
target=self.target
return self.pathFinder.distance(target)
def distances(self,out=None):
""" return the full distance map"""
return self.pathFinder.distances(out)
def predecessors(self,out=None):
""" return the full predecessors map"""
return self.pathFinder.predecessors(out)
ShortestPathPathDijkstra.__module__ = 'vigra.graphs'
graphs.ShortestPathPathDijkstra = ShortestPathPathDijkstra
_genGraphConvenienceFunctions()
del _genGraphConvenienceFunctions
def _genRegionAdjacencyGraphConvenienceFunctions():
class RegionAdjacencyGraph(graphs.AdjacencyListGraph):
def __init__(self,graph=None ,labels=None ,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" Region adjacency graph
Keyword Arguments :
- graph : the base graph, the region adjacency graph should be based on
- labels : label map for the graph
- ignoreLabel : ignore a label in the labels map (default: None)
- reserveEdges : reserve a certain number of Edges
Attributes:
- labels : labels passed in constructor
- ignoreLabel : ignoreLabel passed in constructor
- baseGraphLabels : labels passed in constructor
(fixme,dublicated attribute (see labels) )
- baseGraph : baseGraph is the graph passed in constructor
- affiliatedEdges : for each edge in the region adjacency graph,
a vector of edges of the baseGraph is stored in affiliatedEdges
"""
if(graph is not None and labels is not None):
super(RegionAdjacencyGraph,self).__init__(int(labels.max()+1),int(reserveEdges))
if ignoreLabel is None and isDense is not None and isDense == True:
if ignoreLabel is None:
ignoreLabel=-1
self.labels = labels
self.ignoreLabel = ignoreLabel
self.baseGraphLabels = labels
self.baseGraph = graph
if maxLabel is None:
maxLabel = int(numpy.max(labels))
# set up rag
self.affiliatedEdges = graphs._regionAdjacencyGraphFast(graph,labels,self,maxLabel,int(reserveEdges))
else:
if ignoreLabel is None:
ignoreLabel=-1
self.labels = labels
self.ignoreLabel = ignoreLabel
self.baseGraphLabels = labels
self.baseGraph = graph
# set up rag
self.affiliatedEdges = graphs._regionAdjacencyGraph(graph,labels,self,self.ignoreLabel)
else :
super(RegionAdjacencyGraph,self).__init__(0,0)
def mergeGraph(self):
return graphs.AdjacencyListGraphMergeGraph(self)
def accumulateSeeds(self, seeds, out=None):
graph = self.baseGraph
labels = self.labels
return graphs._pyAccNodeSeeds(self, graph, labels, seeds, out)
def accumulateEdgeFeatures(self,edgeFeatures,acc='mean',out=None):
""" accumulate edge features from base graphs edges features
Keyword Argument:
- edgeFeatures : edge features of baseGraph
- acc : used accumulator (default: 'mean')
Currently only 'mean' and 'sum' are implemented
- out : preallocated edge map
Returns :
accumulated edge features
"""
graph = self.baseGraph
affiliatedEdges = self.affiliatedEdges
if isinstance(edgeFeatures, (graphs.ImplicitMEanEdgeMap_2d_float_float, graphs.ImplicitMEanEdgeMap_3d_float_float)):
if graphs.isGridGraph(graph)==False:
raise RuntimeError("implicit edge maps are only implemented for grid graphs")
return graphs._ragEdgeFeatures(self, graph, affiliatedEdges, edgeFeatures,acc, out)
else:
if self.edgeNum == 0:
raise RuntimeError("self.edgeNum == 0 => cannot accumulate edge features")
if acc == 'mean':
weights = self.baseGraph.edgeLengths()
#print("Weights",weights)
else:
weights = graphs.graphMap(self.baseGraph,'edge',dtype=numpy.float32)
weights[:] = 1
if graphs.isGridGraph2d(graph) and edgeFeatures.ndim == 4 :
return graphs._ragEdgeFeaturesMb(self,graph,affiliatedEdges,edgeFeatures,weights,acc,out)
else:
return graphs._ragEdgeFeatures(self,graph,affiliatedEdges,edgeFeatures,weights,acc,out)
def accumulateEdgeStatistics(self, edgeFeatures, out=None):
if not isinstance(self, RegionAdjacencyGraph):
raise AttributeError("accumulateEdgeFeaturesNew not implemented for " + type(self))
graph = self.baseGraph
affiliatedEdges = self.affiliatedEdges
out = graphs._ragEdgeStatistics(self, graph, affiliatedEdges, edgeFeatures, out)
return out
def accumulateNodeFeatures(self,nodeFeatures,acc='mean',out=None):
""" accumulate edge features from base graphs edges features
Keyword Argument:
- nodeFeatures : node features of baseGraph
- acc : used accumulator (default: 'mean')
Currently only 'mean' and 'sum' are implemented
- out : preallocated node map (default: None)
Returns :
accumulated node features
"""
if self.edgeNum == 0 :
raise RuntimeError("self.edgeNum == 0 => cannot accumulate edge features")
graph = self.baseGraph
labels = self.baseGraphLabels
ignoreLabel = self.ignoreLabel
if acc == 'mean':
#print("get node size...")
weights = self.baseGraph.nodeSize()
#print("weights == ", weights)
else :
weights = graphs.graphMap(self.baseGraph,'node',dtype=numpy.float32)
weights[:]=1
return graphs._ragNodeFeatures(self,graph,labels,nodeFeatures,weights,acc,ignoreLabel,out)
def projectNodeFeatureToBaseGraph(self,features,out=None):
""" project node features from this graph, to the base graph of this graph.
Keyword Arguments:
- features : node feautres for this graph
- out : preallocated node map of baseGraph (default: None)
Returns :
projected node features of base graph
"""
out=graphs._ragProjectNodeFeaturesToBaseGraph(
rag=self,
baseGraph=self.baseGraph,
baseGraphLabels=numpy.squeeze(self.baseGraphLabels),
ragNodeFeatures=features,
ignoreLabel=self.ignoreLabel,
out=out
)
#print("out",out.shape,out.dtype)
return out
def projectLabelsBack(self,steps,labels=None,_current=0):
""" project labels from current graph to baseGraph and repeat this recursively
Keyword Arguments:
- steps : how often should the labels be projected back
- labels : labels for the current graph (default: None)
If labels is None, each node gets its own label
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
if steps == _current :
return labels
else :
labels = self.projectLabelsToBaseGraph(labels)
return self.baseGraph.projectLabelsBack(steps,labels,_current+1)
def projectLabelsToBaseGraph(self,labels=None):
""" project node labels from this graph, to the base graph of this graph.
Keyword Arguments:
- labels : node labels for this graph (default: None)
If labels is None, each node gets its own label
- out : preallocated node map of baseGraph (default: None)
Returns :
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
return self.projectNodeFeatureToBaseGraph(features=labels)
def projectBaseGraphGt(self, baseGraphGt, gt=None, gtQuality=None):
bggt = numpy.require(baseGraphGt,dtype=numpy.uint32)
gt, gtQuality = graphs._ragProjectGroundTruth(rag=self, graph=self.baseGraph,
labels=self.baseGraphLabels, gt=bggt,
ragGt=gt, ragGtQuality=gtQuality)
return gt, gtQuality
def edgeUVCoordinates(self, edgeId):
try :
ei = int(edgeId)
except:
ei = edgeId.id
affEdges = self.affiliatedEdges
uvCoords = affEdges.getUVCoordinates(self.baseGraph, ei)
dim = uvCoords.shape[1] // 2
uCoords = uvCoords[:,0:dim]
vCoords = uvCoords[:,dim:2*dim]
return (uCoords,vCoords)
def edgeTopologicalCoordinates(self, edgeId):
uc,vc = self.edgeUVCoordinates(edgeId)
return uc+vc
def edgeCoordinates(self, edgeId):
uc,vc = self.edgeUVCoordinates(edgeId)
return (uc+vc)/2.0
RegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.RegionAdjacencyGraph = RegionAdjacencyGraph
class GridRegionAdjacencyGraph(graphs.RegionAdjacencyGraph):
def __init__(self,graph=None,labels=None,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" Grid Region adjacency graph
A region adjaceny graph,where the base graph should be
a grid graph or a GridRegionAdjacencyGraph.
Keyword Arguments :
- graph : the base graph, the region adjacency graph should be based on
- labels : label map for the graph
- ignoreLabel : ignore a label in the labels map (default: None)
- reserveEdges : reserve a certain number of Edges
Attributes :
- labels : labels passed in constructor
- ignoreLabel : ignoreLabel passed in constructor
- baseGraphLabels : labels passed in constructor
(fixme,dublicated attribute (see labels) )
- baseGraph : baseGraph is the graph passed in constructor
- affiliatedEdges : for each edge in the region adjacency graph,
a vector of edges of the baseGraph is stored in affiliatedEdges
- shape : shape of the grid graph which is a base graph in the
complete graph chain.
"""
if graph is not None and labels is not None:
if not (graphs.isGridGraph(graph) or isinstance(graph,GridRegionAdjacencyGraph)):
raise RuntimeError("graph must be a GridGraph or a GridRegionAdjacencyGraph")
super(GridRegionAdjacencyGraph, self).__init__(graph, labels, ignoreLabel, reserveEdges, maxLabel, isDense)
else:
super(GridRegionAdjacencyGraph, self).__init__()
@property
def shape(self):
""" shape of the underlying grid graph"""
return self.baseGraph.shape
def projectLabelsToGridGraph(self,labels=None):
"""project labels of this graph to the underlying grid graph.
Keyword Arguments :
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
Returns :
grid graph labeling
"""
if labels is None :
# identity segmentation on this level
labels = self.nodeIdMap()
if graphs.isGridGraph(self.baseGraph):
return self.projectLabelsToBaseGraph(labels)
else :
labels = self.projectLabelsToBaseGraph(labels)
return self.baseGraph.projectLabelsToGridGraph(labels)
def projectNodeFeaturesToGridGraph(self,features):
""" project features of this graph to the underlying grid graph.
Therefore project the features to an image.
Keyword Arguments :
- features : nodeFeatures of the current graph
Returns :
grid graph labeling
"""
if graphs.isGridGraph(self.baseGraph):
return self.projectNodeFeatureToBaseGraph(features)
else :
features = self.projectNodeFeatureToBaseGraph(features)
return self.baseGraph.projectNodeFeaturesToGridGraph(features)
def showNested(self,img,labels=None,returnImg=False):
""" show the complet graph chain / hierarchy given an RGB image
Keyword Arguments:
- img : RGB image
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
"""
ll=[]
if labels is not None:
ll.append( self.projectLabelsToGridGraph(labels) )
ll.append( self.projectLabelsToGridGraph() )
g=self.baseGraph
while graphs.isGridGraph(g)==False:
ll.append( g.projectLabelsToGridGraph() )
g=g.baseGraph
ll.reverse()
gridLabels = [l[...,numpy.newaxis] for l in ll ]
gridLabels = numpy.concatenate(gridLabels,axis=2)
return nestedSegShow(img,gridLabels,returnImg=returnImg)
def show(self,img,labels=None,edgeColor=(0,0,0),alpha=0.3,returnImg=False):
""" show the graph given an RGB image
Keyword Arguments:
- img : RGB image
- labels : node labeling of this graph (default: None)
If labels is None, each node gets its own label
- edgeColor : RGB tuple of edge color (default: (0,0,0) ).
Do not use values bigger than 1 in edgeColor.
- alpha : make edges semi transparent (default: 0.3).
0 means no transparency,1 means full transparency.
"""
pLabels = self.projectLabelsToGridGraph(labels)
return segShow(img,numpy.squeeze(pLabels),edgeColor=edgeColor,alpha=alpha,returnImg=returnImg)
def showEdgeFeature(self, img, edgeFeature, cmap='jet', returnImg=False, labelMode=False):
import matplotlib
assert graphs.isGridGraph(self.baseGraph)
imgOut = img.copy().squeeze()
if imgOut.ndim == 2:
imgOut = numpy.concatenate([imgOut[:,:,None]]*3,axis=2)
imgOut = taggedView(imgOut,'xyc')
imgOut-=imgOut.min()
imgOut/=imgOut.max()
if not labelMode:
edgeFeatureShow = edgeFeature.copy()
mi = edgeFeatureShow.min()
ma = edgeFeatureShow.max()
cm = matplotlib.cm.ScalarMappable(cmap=cmap)
rgb = cm.to_rgba(edgeFeatureShow)[:,0:3]
print(rgb.shape)
if(ma > mi):
edgeFeatureShow -=mi
edgeFeatureShow /= edgeFeatureShow.max()
else:
edgeFeatureShow[:] = 1
for e in self.edgeIter():
u,v = self.edgeUVCoordinates(e.id)
if not labelMode:
showVal = rgb[e.id,:]
else:
if edgeFeature[e.id] == 0:
showVal=[0,0,1]
elif edgeFeature[e.id] == 1:
showVal=[0,1,0]
elif edgeFeature[e.id] == -1:
showVal=[1,0,0]
imgOut[u[:,0],u[:,1],:] = showVal
imgOut[v[:,0],v[:,1],:] = showVal
#print(u.shape)
if returnImg:
return imgOut
imshow(imgOut)
def nodeSize(self):
""" get the geometric size of the nodes """
if graphs.isGridGraph(self.baseGraph):
return graphs._ragNodeSize(self, self.baseGraph, self.labels, self.ignoreLabel)
else:
baseNodeSizes = self.baseGraph.nodeSize()
return self.accumulateNodeFeatures(baseNodeSizes,acc='sum')
def edgeLengths(self):
""" get the geometric length of the edges"""
if graphs.isGridGraph(self.baseGraph):
return graphs._ragEdgeSize(self,self.affiliatedEdges)
else:
baseNodeSizes = self.baseGraph.edgeLengths()
return self.accumulateEdgeFeatures(baseNodeSizes,acc='sum')
def writeHDF5(self, filename, dset):
if(graphs.isGridGraph(self.baseGraph)):
sGraph = self.serialize()
sAffEdges = graphs._serialzieGridGraphAffiliatedEdges(self.baseGraph, self, self.affiliatedEdges )
sLabels = self.labels
writeHDF5(numpy.array([self.ignoreLabel]), filename, dset+'/ignore_label')
writeHDF5(sLabels, filename, dset+'/labels')
writeHDF5(sGraph, filename, dset+'/graph')
writeHDF5(sAffEdges, filename, dset+'/affiliated_edges')
else:
raise RuntimeError("only RAGs of Grid graph can be serialized")
#def readHdf5(self, filename, dset):
# labels = readHdf5(filename, dset+'/labels')
# shape = labels.shape
# self.baseGraph = graphs.gridGraph(shape)
GridRegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.GridRegionAdjacencyGraph = GridRegionAdjacencyGraph
class TinyEdgeLabelGui(object):
def __init__(self, rag, img, edgeLabels = None, labelMode=True):
if labelMode and isinstance(edgeLabels, numpy.ndarray):
assert set(numpy.unique(edgeLabels)).issubset({-1, 0, 1}), 'if labelMode is true only label values of [-1, 0, 1] are permitted'
self.press = None
self.rag = rag
self.img = img
self.edgeLabels = edgeLabels
self.dim = len(img.shape)
self.zOffset = 0
self.edgeRag2dToRag = None
self.edgeRagToRag2d = None
if self.dim == 3:
self.zOffset = self.img.shape[2]//2
self.visuImg = numpy.array(img, dtype=numpy.float32)
self.visuImg -= self.visuImg.min()
self.visuImg /= self.visuImg.max()
self.rag2d = None
self.visuImg2d = None
self.labelMode = labelMode
if self.edgeLabels is None :
self.edgeLabels = numpy.zeros(self.rag.edgeNum, dtype=numpy.float32)
self.edgeLabels2d = None
self.slice2d()
self.implot = None
self.currentLabel = 1
self.brushSize = 1
def startGui(self):
from functools import partial
import pylab as plt
from matplotlib.widgets import Slider, Button, RadioButtons
ax = plt.gca()
fig = plt.gcf()
imgWithEdges =self.rag2d.showEdgeFeature(self.visuImg2d, self.edgeLabels2d, returnImg=True, labelMode=self.labelMode)
self.implot = ax.imshow(numpy.swapaxes(imgWithEdges,0,1))
ff = partial(self.onclick, self)
cid = fig.canvas.mpl_connect('button_press_event', self.onclick)
fig.canvas.mpl_connect('key_press_event', self.press_event)
fig.canvas.mpl_connect('scroll_event', self.scroll)
fig.canvas.mpl_connect('motion_notify_event', self.on_motion)
fig.canvas.mpl_connect('button_release_event', self.on_release)
if self.labelMode:
axcolor = 'lightgoldenrodyellow'
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
self.slideBrush = Slider(axamp, 'brush-size', 1, 20.0, valinit=2)
self.slideBrush.on_changed(self.updateBrushSize)
plt.show()
def updateBrushSize(self, val):
self.brushSize = int(val+0.5)
def press_event(self, event):
sys.stdout.flush()
if event.key=='0' or event.key=='3':
self.currentLabel = 0
if event.key=='1':
self.currentLabel = 1
if event.key=='2':
self.currentLabel = -1
def slice2d(self):
if self.dim==3:
labels = self.rag.labels[:,:,self.zOffset].squeeze()
gg = graphs.gridGraph(labels.shape)
self.rag2d = graphs.regionAdjacencyGraph(gg, labels)
# update edges 2d:
self.edgeLabels2d = numpy.zeros(self.rag2d.edgeNum, dtype=numpy.float32)
# update edge correlation
self.edgeIdRag2dToRag = dict()
self.edgeIdRagToRag2d = dict()
for edge in self.rag2d.edgeIter():
edge3d = self.rag.findEdge(edge.u, edge.v)
self.edgeIdRag2dToRag[edge.id] = edge3d.id
self.edgeIdRagToRag2d[edge3d.id] = edge.id
self.visuImg2d = self.visuImg[:,:,self.zOffset]
# update edge 2d status:
for i in numpy.arange(self.edgeLabels2d.shape[0]):
self.edgeLabels2d[i] = self.edgeLabels[self.edgeIdRag2dToRag[i]]
elif self.dim==2:
self.rag2d = self.rag
self.visuImg2d = self.visuImg
self.edgeIdRag2dToRag = dict()
for edge in self.rag.edgeIter():
self.edgeIdRag2dToRag[edge.id] = edge.id
self.edgeIdRagToRag2d = self.edgeIdRag2dToRag
self.edgeLabels2d = self.edgeLabels
else:
print('warning: bad dimension!')
def scroll(self, event):
import pylab as plt
if self.dim==3:
if event.button == 'up':
self.zOffset += 1
else:
self.zOffset -= 1
self.zOffset = self.zOffset % self.visuImg.shape[2]
self.slice2d()
imgWithEdges = self.rag2d.showEdgeFeature(self.visuImg2d, self.edgeLabels2d,returnImg=True, labelMode=self.labelMode)
self.implot.set_data(numpy.swapaxes(imgWithEdges,0,1))
plt.draw()
def on_motion(self, event):
if self.press is None:
return
print(event.xdata, event.ydata)
self.handle_click(event)
def on_release(self, event):
self.press = None
def onclick(self, event):
self.press = event.xdata, event.ydata
print(event.xdata, event.ydata)
try:
self.handle_click(event)
except:
pass
def handle_click(self, event):
import pylab as plt
if event.button==1:
self.currentLabel = 1
if event.button==2:
self.currentLabel = 0
if event.button==3:
self.currentLabel = -1
img = self.img
rag = self.rag2d
labels = rag.baseGraphLabels
shape = img.shape
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = int(math.floor(event.xdata)),int(math.floor(event.ydata))
#print("X,Y",x,y)
l = labels[x,y]
others = []
bs = self.brushSize
for xo in range(-1*bs, bs+1):
for yo in range(-1*bs, bs+1):
xx = x+xo
yy = y+yo
if xo is not 0 or yo is not 0:
if xx >=0 and xx<shape[0] and \
yy >=0 and yy<shape[0]:
otherLabel = labels[xx, yy]
if l != otherLabel:
edge = rag.findEdge(int(l), int(otherLabel))
#print(edge)
others.append((xx,yy,edge))
#break
#if other is not None:
# pass
if self.labelMode:
for other in others:
eid = other[2].id
oldLabel = self.edgeLabels[self.edgeIdRag2dToRag[eid]]
if self.currentLabel == oldLabel:
newLabel = oldLabel
else:
newLabel = self.currentLabel
self.edgeLabels[self.edgeIdRag2dToRag[eid]] = newLabel
self.edgeLabels2d[eid] = newLabel
imgWithEdges = rag.showEdgeFeature(self.visuImg2d, self.edgeLabels2d,returnImg=True, labelMode=self.labelMode)
self.implot.set_data(numpy.swapaxes(imgWithEdges,0,1))
plt.draw()
TinyEdgeLabelGui.__module__ = 'vigra.graphs'
graphs.TinyEdgeLabelGui = TinyEdgeLabelGui
def loadGridRagHDF5(filename , dset):
#print("load labels and make grid graph")
labels = readHDF5(filename, dset+'/labels')
shape = labels.shape
gridGraph = graphs.gridGraph(shape)
#print(gridGraph)
#print("load graph serialization")
graphSerialization = readHDF5(filename, dset+'/graph')
#print("make empty grid rag")
gridRag = GridRegionAdjacencyGraph()
#print("deserialize")
gridRag.deserialize(graphSerialization)
#print("load affiliatedEdges")
affEdgeSerialization = readHDF5(filename, dset+'/affiliated_edges')
#print("deserialize")
affiliatedEdges = graphs._deserialzieGridGraphAffiliatedEdges(gridGraph, gridRag, affEdgeSerialization)
ignoreLabel = readHDF5(filename, dset+'/ignore_label')
gridRag.affiliatedEdges = affiliatedEdges
gridRag.labels = taggedView(labels,"xyz")
gridRag.ignoreLabel = int(ignoreLabel[0])
gridRag.baseGraphLabels = taggedView(labels,"xyz")
gridRag.baseGraph = gridGraph
return gridRag
loadGridRagHDF5.__module__ = 'vigra.graphs'
graphs.loadGridRagHDF5 = loadGridRagHDF5
def regionAdjacencyGraph(graph,labels,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" Return a region adjacency graph for a labeld graph.
Parameters:
- graph -- input graph
- lables -- node-map with labels for each nodeSumWeights
- ignoreLabel -- label to ingnore (default: None)
- reserveEdges -- reverse a certain number of edges (default: 0)
Returns:
- rag -- instance of RegionAdjacencyGraph or GridRegionAdjacencyGraph
If graph is a GridGraph or a GridRegionAdjacencyGraph, a GridRegionAdjacencyGraph
will be returned.
Otherwise a RegionAdjacencyGraph will be returned
"""
if isinstance(graph , graphs.GridRegionAdjacencyGraph) or graphs.isGridGraph(graph):
return GridRegionAdjacencyGraph(graph=graph, labels=labels, ignoreLabel=ignoreLabel,
reserveEdges=reserveEdges, maxLabel=maxLabel, isDense=isDense)
else:
return RegionAdjacencyGraph(graph=graph, labels=labels, ignoreLabel=ignoreLabel,
reserveEdges=reserveEdges, maxLabel=maxLabel, isDense=isDense)
regionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.regionAdjacencyGraph = regionAdjacencyGraph
def gridRegionAdjacencyGraph(labels,ignoreLabel=None,reserveEdges=0, maxLabel=None, isDense=None):
""" get a region adjacency graph and a grid graph from a labeling.
This function will call 'graphs.gridGraph' and 'graphs.regionAdjacencyGraph'
Keyword Arguments:
- labels : label image
- ignoreLabel : label to ingnore (default: None)
- reserveEdges : reserve a number of edges (default: 0)
"""
_gridGraph=graphs.gridGraph(numpy.squeeze(labels).shape)
rag=graphs.regionAdjacencyGraph(graph=_gridGraph, labels=labels, ignoreLabel=ignoreLabel,
reserveEdges=reserveEdges, maxLabel=maxLabel, isDense=isDense)
return _gridGraph, rag
gridRegionAdjacencyGraph.__module__ = 'vigra.graphs'
graphs.gridRegionAdjacencyGraph = gridRegionAdjacencyGraph
_genRegionAdjacencyGraphConvenienceFunctions()
del _genRegionAdjacencyGraphConvenienceFunctions
def _genGraphSegmentationFunctions():
def getNodeSizes(graph):
""" get size of nodes:
This functions will try to call 'graph.nodeSize()' .
If this fails, a node map filled with 1.0 will be
returned
Keyword Arguments:
- graph : input graph
"""
try:
return graph.nodeSize()
except:
size = graphs.graphMap(graph,'node',dtype=numpy.float32)
size[:]=1
return size
getNodeSizes.__module__ = 'vigra.graphs'
graphs.getNodeSizes = getNodeSizes
def getEdgeLengths(graph):
""" get lengths/sizes of edges:
This functions will try to call 'graph.edgeLength()' .
If this fails, an edge map filled with 1.0 will be
returned
Keyword Arguments:
- graph : input graph
"""
try:
return graph.edgeLengths()
except:
size = graphs.graphMap(graph,'edge',dtype=numpy.float32)
size[:]=1
return size
getEdgeLengths.__module__ = 'vigra.graphs'
graphs.getEdgeLengths = getEdgeLengths
def felzenszwalbSegmentation(graph,edgeWeights,nodeSizes=None,k=1.0,nodeNumStop=None,out=None):
""" felzenszwalbs segmentation method
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weights / indicators
- nodeSizes : size of each node (default: None)
If nodeSizes is None, 'getNodeSizes' will be called
- k : free parameter in felzenszwalbs algorithms (default : 1.0)
(todo: write better docu)
- nodeNumStop : stop the agglomeration at a given nodeNum (default :None)
If nodeNumStop is None, the resulting number of nodes does depends on k.
- backgroundBias : backgroundBias (default : None)
"""
if nodeNumStop is None :
nodeNumStop=-1
if nodeSizes is None :
nodeSizes=graphs.getNodeSizes(graph)
return graphs._felzenszwalbSegmentation(graph=graph,edgeWeights=edgeWeights,nodeSizes=nodeSizes,
k=k,nodeNumStop=nodeNumStop,out=out)
felzenszwalbSegmentation.__module__ = 'vigra.graphs'
graphs.felzenszwalbSegmentation = felzenszwalbSegmentation
def edgeWeightedWatersheds(graph,edgeWeights,seeds,backgroundLabel=None,backgroundBias=None,out=None):
""" edge weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- edgeWeights : evaluation weights
- seeds : node map with seeds .
For at least one node, seeds must be nonzero
- backgroundLabel : a specific backgroundLabel (default : None)
- backgroundBias : backgroundBias (default : None)
"""
if backgroundLabel is None and backgroundBias is None:
return graphs._edgeWeightedWatershedsSegmentation(graph=graph,edgeWeights=edgeWeights,seeds=seeds,
out=out)
else :
if backgroundLabel is None or backgroundBias is None:
raise RuntimeError("if backgroundLabel or backgroundBias is not None, the other must also be not None")
return graphs._carvingSegmentation(graph=graph,edgeWeights=edgeWeights,seeds=seeds,
backgroundLabel=backgroundLabel,backgroundBias=backgroundBias,out=out)
edgeWeightedWatersheds.__module__ = 'vigra.graphs'
graphs.edgeWeightedWatersheds = edgeWeightedWatersheds
def nodeWeightedWatershedsSeeds(graph,nodeWeights,out=None):
""" generate watersheds seeds
Keyword Arguments :
- graph : input graph
- nodeWeights : node height map
- out : seed map
"""
return graphs._nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights,out=out)
nodeWeightedWatershedsSeeds.__module__ = 'vigra.graphs'
graphs.nodeWeightedWatershedsSeeds = nodeWeightedWatershedsSeeds
def shortestPathSegmentation(graph, edgeWeights, nodeWeights, seeds=None, out=None):
""" node weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weight map
- nodeWeights : node weight map
- seeds : node map with seeds (default: None)
If seeds are None, 'nodeWeightedWatershedsSeeds' will be called
"""
if seeds is None:
seeds = graphs.nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights)
return graphs._shortestPathSegmentation(graph=graph, edgeWeights=edgeWeights, nodeWeights=nodeWeights,
seeds=seeds, out=out)
shortestPathSegmentation.__module__ = 'vigra.graphs'
graphs.shortestPathSegmentation = shortestPathSegmentation
def nodeWeightedWatersheds(graph,nodeWeights,seeds=None,method='regionGrowing',out=None):
""" node weighted seeded watersheds
Keyword Arguments :
- graph : input graph
- nodeWeights : node height map / evaluation weights
- seeds : node map with seeds (default: None)
If seeds are None, 'nodeWeightedWatershedsSeeds' will be called
"""
if seeds is None:
seeds = graphs.nodeWeightedWatershedsSeeds(graph=graph,nodeWeights=nodeWeights)
if method!='regionGrowing':
raise RuntimeError("currently only 'regionGrowing' is supported")
return graphs._nodeWeightedWatershedsSegmentation(graph=graph,nodeWeights=nodeWeights,seeds=seeds,method=method,out=out)
nodeWeightedWatersheds.__module__ = 'vigra.graphs'
graphs.nodeWeightedWatersheds = nodeWeightedWatersheds
def seededSegmentation(graph, nodeMap=None, edgeMap=None, seeds=None, alg='ws',out=None,**kwargs):
"""
alg:
- 'ws' watershed
- 'sp' shortest path
- 'crf' crf/mrf method
- 'hc' hierarchical-clustering method
"""
if alg == 'ws':
# "default" node weighted watershed
if nodeMap is not None and edgeMap is None:
seg = graphs.nodeWeightedWatersheds(graph=graph,
nodeWeights=nodeMap,
seeds=seeds,out=out)
# edge weighted watershed
elif nodeMap is None and edgeMap is not None:
seg = graphs.edgeWeightedWatersheds(graph=graph,
edgeWeights=edgeMap,
seeds=seeds,out=out)
# hybrid (not yet implemented)
elif nodeMap is not None and edgeMap is not None:
raise RuntimeError("Not Yet Implemented")
else :
# error
raise RuntimeError("error")
elif alg == 'sp':
# "default" shortest path
if nodeMap is None and edgeMap is None:
raise RuntimeError("Not Yet Implemented")
elif nodeMap is not None or edgeMap is not None:
if nodeMap is None:
nodeMap = graphs.graphMap(graph,'node',dtype='float32')
nodeMap[:] = 0
if edgeMap is None:
edgeMap = graphs.graphMap(graph,'edge',dtype='float32')
edgeMap[:] = 0
seg = graphs.shortestPathSegmentation(graph=graph,
edgeWeights=edgeMap,
nodeWeights=nodeMap,
seeds=seeds,out=out)
else :
# error
raise RuntimeError("error")
elif alg == 'crf':
raise RuntimeError("Not Yet Implemented")
return seg
seededSegmentation.__module__ = 'vigra.graphs'
graphs.seededSegmentation = seededSegmentation
def wsDtSegmentation(pmap, pmin, minMembraneSize, minSegmentSize, sigmaMinima, sigmaWeights, cleanCloseSeeds=True):
"""A probability map 'pmap' is provided and thresholded using pmin.
This results in a mask. Every connected component which has fewer pixel
than 'minMembraneSize' is deleted from the mask. The mask is used to
calculate the signed distance transformation.
From this distance transformation the segmentation is computed using
a seeded watershed algorithm. The seeds are placed on the local maxima
of the distanceTrafo after smoothing with 'sigmaMinima'.
The weights of the watershed are defined by the inverse of the signed
distance transform smoothed with 'sigmaWeights'.
'minSegmentSize' determines how small the smallest segment in the final
segmentation is allowed to be. If there are smaller ones the corresponding
seeds are deleted and the watershed is done again.
If 'cleanCloseSeeds' is True, multiple seed points that are clearly in the
same neuron will be merged with a heuristik that ensures that no seeds of
two different neurons are merged.
"""
def cdist(xy1, xy2):
# influenced by: http://stackoverflow.com/a/1871630
d = numpy.zeros((xy1.shape[1], xy1.shape[0], xy1.shape[0]))
for i in numpy.arange(xy1.shape[1]):
d[i,:,:] = numpy.square(numpy.subtract.outer(xy1[:,i], xy2[:,i]))
d = numpy.sum(d, axis=0)
return numpy.sqrt(d)
def findBestSeedCloserThanMembrane(seeds, distances, distanceTrafo, membraneDistance):
""" finds the best seed of the given seeds, that is the seed with the highest value distance transformation."""
closeSeeds = distances <= membraneDistance
numpy.zeros_like(closeSeeds)
# iterate over all close seeds
maximumDistance = -numpy.inf
mostCentralSeed = None
for seed in seeds[closeSeeds]:
if distanceTrafo[seed[0], seed[1], seed[2]] > maximumDistance:
maximumDistance = distanceTrafo[seed[0], seed[1], seed[2]]
mostCentralSeed = seed
return mostCentralSeed
def nonMaximumSuppressionSeeds(seeds, distanceTrafo):
""" removes all seeds that have a neigbour that is closer than the the next membrane
seeds is a list of all seeds, distanceTrafo is array-like
return is a list of all seeds that are relevant.
works only for 3d
"""
seedsCleaned = set()
# calculate the distances from each seed to the next seeds.
distances = cdist(seeds, seeds)
for i in numpy.arange(len(seeds)):
membraneDistance = distanceTrafo[seeds[i,0], seeds[i,1], seeds[i,2]]
bestAlternative = findBestSeedCloserThanMembrane(seeds, distances[i,:], distanceTrafo, membraneDistance)
seedsCleaned.add(tuple(bestAlternative))
return numpy.array(list(seedsCleaned))
def volumeToListOfPoints(seedsVolume, threshold=0.):
return numpy.array(numpy.where(seedsVolume > threshold)).transpose()
def placePointsInVolumen(points, shape):
volumen = numpy.zeros(shape)
points = numpy.maximum(points, numpy.array((0, 0, 0)))
points = numpy.minimum(points, numpy.array(shape) - 1)
for point in (numpy.floor(points)).astype(int):
volumen[point[0], point[1], point[2]] = 1
return volumen
# get the thresholded pmap
binary = numpy.zeros_like(pmap, dtype=numpy.uint32)
binary[pmap >= pmin] = 1
# delete small CCs
labeled = analysis.labelVolumeWithBackground(binary)
analysis.sizeFilterSegInplace(labeled, int(numpy.max(labeled)), int(minMembraneSize), checkAtBorder=True)
# use cleaned binary image as mask
mask = numpy.zeros_like(binary, dtype = numpy.float32)
mask[labeled > 0] = 1.
# perform signed dt on mask
dt = filters.distanceTransform3D(mask)
dtInv = filters.distanceTransform3D(mask, background=False)
dtInv[dtInv>0] -= 1
dtSigned = dt.max() - dt + dtInv
dtSignedSmoothMinima = filters.gaussianSmoothing(dtSigned, sigmaMinima)
dtSignedSmoothWeights = filters.gaussianSmoothing(dtSigned, sigmaWeights)
seeds = analysis.localMinima3D(dtSignedSmoothMinima, neighborhood=26, allowAtBorder=True)
if cleanCloseSeeds:
seeds = nonMaximumSuppressionSeeds(volumeToListOfPoints(seeds), dt)
seeds = placePointsInVolumen(seeds, mask.shape).astype(numpy.uint32)
seedsLabeled = analysis.labelVolumeWithBackground(seeds)
segmentation = analysis.watershedsNew(dtSignedSmoothWeights, seeds = seedsLabeled, neighborhood=26)[0]
analysis.sizeFilterSegInplace(segmentation, int(numpy.max(segmentation)), int(minSegmentSize), checkAtBorder=True)
segmentation = analysis.watershedsNew(dtSignedSmoothWeights, seeds = segmentation, neighborhood=26)[0]
return segmentation
wsDtSegmentation.__module__ = 'vigra.analysis'
analysis.wsDtSegmentation = wsDtSegmentation
def agglomerativeClustering(graph,edgeWeights=None,edgeLengths=None,nodeFeatures=None,nodeSizes=None,
nodeLabels=None,nodeNumStop=None,beta=0.5,metric='l1',wardness=1.0,out=None):
""" agglomerative hierarchicalClustering
Keyword Arguments :
- graph : input graph
- edgeWeights : edge weights / indicators (default : None)
- edgeLengths : length / weight of each edge (default : None)
Since we do weighted mean agglomeration, a length/weight
is needed for each edge to merge 2 edges w.r.t. weighted mean.
If no edgeLengths is given, 'getEdgeLengths' is called.
- nodeFeatures : a feature vector for each node (default: None)
A feature vector as RGB values,or a histogram for each node.
Within the agglomeration, an additional edge weight will be
computed from the "difference" between the features of two adjacent nodes.
The metric specified in the keyword 'metric' is used to compute this
difference
- nodeSizes : size / weight of each node (default : None)
Since we do weighted mean agglomeration, a size / weight
is needed for each node to merge 2 edges w.r.t. weighted mean.
If no nodeSizes is given, 'getNodeSizes' is called.
- nodeNumStop : stop the agglomeration at a given nodeNum (default : graph.nodeNum/2)
- beta : weight between edgeWeights and nodeFeatures based edgeWeights (default:0.5) :
0.0 means only edgeWeights (from keyword edge weights) and 1.0 means only edgeWeights
from nodeFeatures differences
- metric : metric used to compute node feature difference (default : 'l1')
- wardness : 0 means do not apply wards critrion, 1.0 means fully apply wards critrion (default : 1.0)
- out : preallocated nodeMap for the resulting labeling (default : None)
Returns:
A node labele map encoding the segmentation
"""
assert edgeWeights is not None or nodeFeatures is not None
print("prepare ")
if nodeNumStop is None:
nodeNumStop = max(graph.nodeNum//2,min(graph.nodeNum,2))
if edgeLengths is None :
print("get edge length")
edgeLengths = graphs.getEdgeLengths(graph)
if nodeSizes is None:
print("get node size")
nodeSizes = graphs.getNodeSizes(graph)
if edgeWeights is None :
print("get wegihts length")
edgeWeights = graphs.graphMap(graph,'edge')
edgeWeights[:]=0
if nodeFeatures is None :
print("get node feat")
nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
nodeFeatures[:]=0
if nodeLabels is None:
nodeLabels = graphs.graphMap(graph,'node',dtype='uint32')
#import sys
#print("graph refcout", sys.getrefcount(graph))
mg = graphs.mergeGraph(graph)
#print("graph refcout", sys.getrefcount(graph))
#mg = []
#del mg
#import gc
#gc.collect()
#print("graph refcout", sys.getrefcount(graph))
#sys.exit(0)
clusterOp = graphs.minEdgeWeightNodeDist(mg,edgeWeights=edgeWeights,edgeLengths=edgeLengths,
nodeFeatures=nodeFeatures,nodeSizes=nodeSizes,
nodeLabels=nodeLabels,
beta=float(beta),metric=metric,wardness=wardness)
hc = graphs.hierarchicalClustering(clusterOp, nodeNumStopCond=nodeNumStop,
buildMergeTreeEncoding=False)
hc.cluster()
labels = hc.resultLabels(out=out)
#del hc
#del clusterOp
#del mg
return labels
agglomerativeClustering.__module__ = 'vigra.graphs'
graphs.agglomerativeClustering = agglomerativeClustering
def minEdgeWeightNodeDist(mergeGraph,edgeWeights=None,edgeLengths=None,nodeFeatures=None,nodeSizes=None,
nodeLabels=None,outWeight=None,
beta=0.5,metric='squaredNorm',wardness=1.0, gamma=10000000.0):
graph=mergeGraph.graph()
assert edgeWeights is not None or nodeFeatures is not None
if edgeLengths is None :
edgeLengths = graphs.getEdgeLengths(graph,addChannelDim=True)
if nodeSizes is None:
nodeSizes = graphs.getNodeSizes(graph,addChannelDim=True)
if edgeWeights is None :
edgeWeights = graphs.graphMap(graph,'edge',addChannelDim=True)
edgeWeights[:]=0
if nodeFeatures is None :
nodeFeatures = graphs.graphMap(graph,'node',addChannelDim=True)
nodeFeatures[:]=0
if outWeight is None:
outWeight=graphs.graphMap(graph,item='edge',dtype=numpy.float32)
if nodeLabels is None :
nodeLabels = graphs.graphMap(graph,'node',dtype='uint32')
nodeLabels[:]=0
if metric=='squaredNorm':
nd=graphs.MetricType.squaredNorm
elif metric=='norm':
nd=graphs.MetricType.norm
elif metric=='chiSquared':
nd=graphs.MetricType.chiSquared
elif metric in ('l1','manhattan'):
nd=graphs.MetricType.manhattan
elif isinstance(metric,graphs.MetricType):
nd=metric
else :
raise RuntimeError("'%s' is not a supported distance type"%str(metric))
# call unsave c++ function and make it sav
print("nodeLabels ",nodeLabels.shape, nodeLabels.dtype)
op = graphs.__minEdgeWeightNodeDistOperator(mergeGraph,edgeWeights,edgeLengths,nodeFeatures,nodeSizes,outWeight,nodeLabels,
float(beta),nd,float(wardness),float(gamma))
op.__base_object__=mergeGraph
op.__outWeightArray__=outWeight
op.edgeLengths=edgeLengths
op.nodeSizes=nodeSizes
op.edgeWeights=edgeWeights
op.nodeFeatures=nodeFeatures
return op
minEdgeWeightNodeDist.__module__ = 'vigra.graphs'
graphs.minEdgeWeightNodeDist = minEdgeWeightNodeDist
def pythonClusterOperator(mergeGraph,operator,useMergeNodeCallback=True,useMergeEdgesCallback=True,useEraseEdgeCallback=True):
#call unsave function and make it save
op = graphs.__pythonClusterOperator(mergeGraph,operator,useMergeNodeCallback,useMergeEdgesCallback,useEraseEdgeCallback)
#op.__dict__['__base_object__']=mergeGraph
#op.__base_object__=mergeGraph
return op
pythonClusterOperator.__module__ = 'vigra.graphs'
graphs.pythonClusterOperator = pythonClusterOperator
def hierarchicalClustering(clusterOperator,nodeNumStopCond,buildMergeTreeEncoding=True):
# call unsave c++ function and make it save
hc = graphs.__hierarchicalClustering(clusterOperator,int(nodeNumStopCond),bool(buildMergeTreeEncoding))
#hc.__dict__['__base_object__']=clusterOperator
hc.__base_object__ = clusterOperator
return hc
hierarchicalClustering.__module__ = 'vigra.graphs'
graphs.hierarchicalClustering = hierarchicalClustering
_genGraphSegmentationFunctions()
del _genGraphSegmentationFunctions
def _genHistogram():
def gaussianHistogram(image,minVals,maxVals,bins=30,
sigma=3.0,sigmaBin=2.0,out=None):
"""
"""
spatialDim = image.ndim - 1
out = histogram.gaussianHistogram_(image=image, minVals=minVals, maxVals=maxVals,
bins=bins, sigma=sigma, sigmaBin=sigmaBin,
out=out)
out = out.reshape(image.shape[0:spatialDim]+(-1,))
if spatialDim == 2:
out /= numpy.sum(out,axis=spatialDim)[:,:, numpy.newaxis]
elif spatialDim == 3:
out /= numpy.sum(out,axis=spatialDim)[:,:,:, numpy.newaxis]
elif spatialDim == 4:
out /= numpy.sum(out,axis=spatialDim)[:,:,:, :,numpy.newaxis]
return out
gaussianHistogram.__module__ = 'vigra.histogram'
histogram.gaussianHistogram = gaussianHistogram
def gaussianRankOrder(image, minVal=None, maxVal=None,
bins=20, sigmas=None, ranks=[0.1,0.25,0.5,0.75,0.9],
out=None):
# FIXME: crashes on Python3
image = numpy.require(image.squeeze(),dtype='float32')
nDim = image.ndim
if sigmas is None:
sigmas = (2.0,)*nDim + (float(bins)/10.0,)
ranks = numpy.require(ranks,dtype='float32')
sigmas = numpy.require(sigmas,dtype='float32')
assert len(sigmas) == image.ndim + 1
if minVal is None :
minVal = image.min()
if maxVal is None :
maxVal = image.max()
#print("image",image.shape,image.dtype)
#print("ranks",ranks.shape,ranks.dtype)
#print("sigmas",sigmas)
return histogram._gaussianRankOrder(image=image,
minVal=float(minVal),
maxVal=float(maxVal),
bins=int(bins),
sigmas=sigmas,ranks=ranks,
out=out)
gaussianRankOrder.__module__ = 'vigra.histogram'
histogram.gaussianRankOrder = gaussianRankOrder
_genHistogram()
del _genHistogram
def _genGraphSmoothingFunctions():
def recursiveGraphSmoothing( graph,nodeFeatures,edgeIndicator,gamma,
edgeThreshold,scale=1.0,iterations=1,out=None):
""" recursive graph smoothing to smooth node features.
Each node feature is smoothed with the features of neighbor nodes.
The strength of the smoothing is computed from:
"edgeIndicator > edgeThreshold ? 0 : exp(-1.0*gamma*edgeIndicator)*scale"
Therefore this filter is edge preserving.
Keyword Arguments :
- graph : input graph
- nodeFeatures : node features which should be smoothed
- edgeIndicator : edge indicator
- gamma : scale edgeIndicator by gamma bevore taking the negative exponent
- scale : how much should a node be mixed with its neighbours per iteration
- iteration : how often should recursiveGraphSmoothing be called recursively
Returns :
smoothed nodeFeatures
"""
return graphs._recursiveGraphSmoothing(graph=graph,nodeFeatures=nodeFeatures,edgeIndicator=edgeIndicator,
gamma=gamma,edgeThreshold=edgeThreshold,scale=scale,iterations=iterations,out=out)
recursiveGraphSmoothing.__module__ = 'vigra.graphs'
graphs.recursiveGraphSmoothing = recursiveGraphSmoothing
_genGraphSmoothingFunctions()
del _genGraphSmoothingFunctions
def _genGraphMiscFunctions():
def nodeFeaturesToEdgeWeights(graph,nodeFeatures,metric='l1',out=None):
""" compute an edge indicator from node features .
Keyword Arguments :
- graph : input graph
- nodeFeatures : node map with feature vector for each node
- metric : metric / distance used to convert 2 node features to
an edge weight
Returns :
edge indicator
"""
return graphs._nodeFeatureDistToEdgeWeight(graph=graph,nodeFeatures=nodeFeatures,metric=metric,out=out)
nodeFeaturesToEdgeWeights.__module__ = 'vigra.graphs'
graphs.nodeFeaturesToEdgeWeights = nodeFeaturesToEdgeWeights
_genGraphMiscFunctions()
del _genGraphMiscFunctions
def _genBlockwiseFunctions():
def makeTuple(val, ndim):
tvals = None
if isinstance(val, Number):
tvals = (float(val),)*ndim
else :
tvals = tuple(val)
if len(tvals) != ndim:
raise RuntimeError("sigma/innerScale/outerScale must be as long as ndim, or must be a scalar")
return tvals
def getConvolutionOptionsClass(ndim):
assert ndim >=2 and ndim <= 5
if ndim == 2 :
return blockwise.BlockwiseConvolutionOptions2D
elif ndim == 3 :
return blockwise.BlockwiseConvolutionOptions3D
elif ndim == 4 :
return blockwise.BlockwiseConvolutionOptions4D
elif ndim == 5 :
return blockwise.BlockwiseConvolutionOptions5D
def convolutionOptions(blockShape, sigma=None,innerScale=None, outerScale=None, numThreads = cpu_count()):
ndim = len(blockShape)
options = getConvolutionOptionsClass(ndim)()
options.blockShape = blockShape
options.numThreads = numThreads
if sigma is not None:
sigma = makeTuple(sigma,ndim)
options.stdDev = sigma
if innerScale is not None:
options.innerScale = makeTuple(innerScale,ndim)
if outerScale is not None:
options.outerScale = makeTuple(outerScale,ndim)
return options
convolutionOptions.__module__ = 'vigra.blockwise'
blockwise.convolutionOptions = convolutionOptions
blockwise.convOpts = convolutionOptions
def gaussianSmooth(image,options,out=None):
out = blockwise._gaussianSmooth(image,options,out)
return out
gaussianSmooth.__module__ = 'vigra.blockwise'
blockwise.gaussianSmooth = gaussianSmooth
def gaussianGradient(image,options,out=None):
out = blockwise._gaussianGradient(image,options,out)
return out
gaussianGradient.__module__ = 'vigra.blockwise'
blockwise.gaussianGradient = gaussianGradient
def gaussianGradientMagnitude(image,options,out=None):
out = blockwise._gaussianGradientMagnitude(image,options,out)
return out
gaussianGradientMagnitude.__module__ = 'vigra.blockwise'
blockwise.gaussianGradientMagnitude = gaussianGradientMagnitude
def hessianOfGaussianEigenvalues(image,options,out=None):
out = blockwise._hessianOfGaussianEigenvalues(image,options,out)
return out
hessianOfGaussianEigenvalues.__module__ = 'vigra.blockwise'
blockwise.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
def hessianOfGaussianFirstEigenvalue(image,options,out=None):
out = blockwise._hessianOfGaussianFirstEigenvalue(image,options,out)
return out
hessianOfGaussianFirstEigenvalue.__module__ = 'vigra.blockwise'
blockwise.hessianOfGaussianFirstEigenvalue = hessianOfGaussianFirstEigenvalue
def hessianOfGaussianLastEigenvalue(image,options,out=None):
out = blockwise._hessianOfGaussianLastEigenvalue(image,options,out)
return out
hessianOfGaussianLastEigenvalue.__module__ = 'vigra.blockwise'
blockwise.hessianOfGaussianLastEigenvalue = hessianOfGaussianLastEigenvalue
_genBlockwiseFunctions()
del _genBlockwiseFunctions
def loadBSDGt(filename):
import scipy.io as sio
matContents = sio.loadmat(filename)
ngt = len(matContents['groundTruth'][0])
gts = []
for gti in range(ngt):
gt = matContents['groundTruth'][0][gti][0]['Segmentation'][0]
gt = numpy.swapaxes(gt,0,1)
gt = gt.astype(numpy.uint32)
print(gt.min(),gt.max())
gts.append(gt[:,:,None])
gtArray = numpy.concatenate(gts,axis=2)
print(gtArray.shape)
return gtArray
def pmapSeeds(pmap):
pass
| mit |
studywolf/control | studywolf_control/sim_and_plot.py | 1 | 7267 | '''
Copyright (C) 2014 Terry Stewart and Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
class Runner:
"""
A class for drawing the arm simulation.
NOTE: If you're getting an error along the lines of
'xrange is not an iterator', make sure that you have
the most recent version of matplotlib, from their github.
"""
def __init__(self, title='', dt=1e-3, control_steps=1,
display_steps=1, t_target=1.0,
control_type='', trajectory=None,
infinite_trail=False, mouse_control=False):
self.dt = dt
self.control_steps = control_steps
self.display_steps = display_steps
self.target_steps = int(t_target/float(dt*display_steps))
self.trajectory = trajectory
self.control_type = control_type
self.infinite_trail = infinite_trail
self.mouse_control = mouse_control
self.title = title
self.sim_step = 0
self.trail_index = 0
self.tau = None
def run(self, arm, control_shell, end_time=None):
self.end_time = end_time
self.arm = arm
if arm.DOF == 1:
box = [-1, 1, -.25, 1.5]
elif arm.DOF == 2:
box = [-.5, .5, -.25, .75]
elif arm.DOF == 3:
box = [-2, 2, -.5, 4]
self.shell = control_shell
fig = plt.figure(figsize=(5.1,5.1), dpi=None)
fig.suptitle(self.title);
# set the padding of the subplot explicitly
fig.subplotpars.left=.1; fig.subplotpars.right=.9
fig.subplotpars.bottom=.1; fig.subplotpars.top=.9
ax = fig.add_subplot(1, 1, 1,
xlim=(box[0], box[1]),
ylim=(box[2], box[3]))
ax.xaxis.grid(); ax.yaxis.grid()
# make it a square plot
ax.set_aspect(1)
# set up plot elements
self.trail, = ax.plot([], [], color='#888888', lw=3)
self.arm_line, = ax.plot([], [], 'o-', mew=4, color='b', lw=5)
self.target_line, = ax.plot([], [], 'r-x', mew=4)
self.info = ax.text(box[0]+abs(.1*box[0]), \
box[3]-abs(.1*box[3]), \
'', va='top')
self.trail_data = np.ones((self.target_steps, 2), \
dtype='float') * np.NAN
if self.trajectory is not None:
ax.plot(self.trajectory[:,0], self.trajectory[:,1], alpha=.3)
# connect up mouse event if specified
if self.mouse_control:
self.target = self.shell.controller.gen_target(arm)
# get pixel width of fig (-.2 for the padding)
self.fig_width = (fig.get_figwidth() - .2 \
* fig.get_figwidth()) * fig.get_dpi()
def move_target(event):
# get mouse position and scale appropriately to convert to (x,y)
if event.xdata is not None:
self.target = np.array([event.xdata, event.ydata])
# set target for the controller
self.shell.controller.target = self.target
# hook up function to mouse event
fig.canvas.mpl_connect('motion_notify_event', move_target)
frames = 50
anim = animation.FuncAnimation(fig, self.anim_animate,
init_func=self.anim_init, frames=5000, interval=0, blit=True)
self.anim = anim
def make_info_text(self):
text = []
text.append('t = %1.4g'%(self.sim_step*self.dt))
q_text = ' '.join('%4.3f,'%F for F in self.arm.q)
text.append('q = ['+q_text+']')
u_text = ' '.join('%4.3f,'%F for F in self.shell.u)
text.append('u = ['+u_text+']')
return '\n'.join(text)
def anim_init(self):
self.info.set_text('')
self.arm_line.set_data([], [])
self.target_line.set_data([], [])
self.trail.set_data([], [])
return self.arm_line, self.target_line, self.info, self.trail
def anim_animate(self, i):
if self.end_time is not None:
# check for run time
if (self.sim_step * self.dt) > self.end_time:
self.anim._stop()
plt.close()
if self.control_type == 'random':
# update target after specified period of time passes
if self.sim_step % (self.target_steps*self.display_steps) == 0:
self.target = self.shell.controller.gen_target(self.arm)
else:
self.target = self.shell.controller.target
# before drawing
for j in range(self.display_steps):
# update control signal
if (self.sim_step % self.control_steps) == 0 or self.tau is None:
self.tau = self.shell.control(self.arm)
# apply control signal and simulate
self.arm.apply_torque(u=self.tau, dt=self.dt)
self.sim_step +=1
# update figure
self.arm_line.set_data(*self.arm.position())
self.info.set_text(self.make_info_text())
self.trail.set_data(self.trail_data[:,0], self.trail_data[:,1])
if self.target is not None:
target = self.target
self.target_line.set_data(target)
# update hand trail
if self.shell.pen_down:
if self.infinite_trail:
# if we're writing, keep all pen_down history
self.trail_index += 1
# if we've hit the end of the trail, double it and copy
if self.trail_index >= self.trail_data.shape[0]-1:
trail_data = np.zeros((self.trail_data.shape[0]*2,
self.trail_data.shape[1]))*np.nan
trail_data[:self.trail_index+1] = self.trail_data
self.trail_data = trail_data
self.trail_data[self.trail_index] = \
self.arm_line.get_xydata()[-1]
else:
# else just use a buffer window
self.trail_data[:-1] = self.trail_data[1:]
self.trail_data[-1] = self.arm_line.get_xydata()[-1]
else:
# if pen up add a break in the trail
self.trail_data[self.trail_index] = [np.nan, np.nan]
return self.target_line, self.info, self.trail, self.arm_line
def show(self):
try:
plt.show()
except AttributeError:
pass
| gpl-3.0 |
RyanChinSang/ECNG3020-ORSS4SCVI | BETA/TestCode/Tensorflow/object_detection/APP-objdettf1.py | 1 | 7677 | # coding: utf-8
# # Object Detection Demo
# Welcome to the object detection inference walkthrough!
# This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an
# image.
# Make sure to follow the [installation instructions]
# (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start.
# # Imports
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from threading import Thread
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=(), daemon=True).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stream.release()
self.stopped = True
# ## Env setup
# This is needed to display the images.
# get_ipython().run_line_magic('matplotlib', 'inline')
# This is needed since the notebook is stored in the object_detection folder.
# sys.path.append("..")
# ## Object detection imports
# Here are the imports from the object detection module.
from BETA.TestCode.Tensorflow.object_detection.utils import label_map_util
from BETA.TestCode.Tensorflow.object_detection.utils import visualization_utils as vis_util
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT`
# to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo]
# (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md)
# for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
# MODEL_FILE = MODEL_NAME + '.tar.gz'
# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
## Download Model
# opener = urllib.request.URLopener()
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this
# corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping
# integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code - converts image into numpy array. This is done by openCV by default
# In[ ]:
# def load_image_into_numpy_array(image):
# (im_width, im_height) = image.size
# return np.array(image.getdata()).reshape(
# (im_height, im_width, 3)).astype(np.uint8)
# # Detection
# In[ ]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3)]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# In[ ]:
# cap = cv2.VideoCapture(0)
cap = WebcamVideoStream(src=0).start()
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# for image_path in TEST_IMAGE_PATHS:
frames = 0
while 1:
# image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
# image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# _, image_np = cap.read()
image_np = cap.read()
# if image_np in cap.read():
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
# plt.figure(figsize=IMAGE_SIZE)
# plt.imshow(image_np)
cv2.imshow('image', image_np)
if cv2.waitKey(1) & 0xFF == ord('q'):
# cap.release()
cap.stop()
cv2.destroyAllWindows()
break
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/transforms.py | 2 | 88425 | """
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
from __future__ import print_function, division
import numpy as np
from numpy import ma
from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,
update_path_extents)
from numpy.linalg import inv
from weakref import WeakValueDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
from path import Path
DEBUG = False
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
pass_through = False
"""
If pass_through is True, all ancestors will always be
invalidated, even if 'self' is already invalid.
"""
def __init__(self, shorthand_name=None):
"""
Creates a new :class:`TransformNode`.
**shorthand_name** - a string representing the "name" of this
transform. The name carries no significance
other than to improve the readability of
``str(transform)`` when DEBUG=True.
"""
# Parents are stored in a WeakValueDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakValueDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or it's repr
return self._shorthand_name or repr(self)
def __getstate__(self):
d = self.__dict__.copy()
# turn the weakkey dictionary into a normal dictionary
d['_parents'] = dict(self._parents.iteritems())
return d
def __setstate__(self, data_dict):
self.__dict__ = data_dict
# turn the normal dictionary back into a WeakValueDictionary
self._parents = WeakValueDictionary(self._parents)
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and triggers an
invalidation of its ancestors. Should be called any
time the transform changes.
"""
value = self.INVALID
if self.is_affine:
value = self.INVALID_AFFINE
return self._invalidate_internal(value, invalidating_node=self)
def _invalidate_internal(self, value, invalidating_node):
"""
Called by :meth:`invalidate` and subsequently ascends the transform
stack calling each TransformNode's _invalidate_internal method.
"""
# determine if this call will be an extension to the invalidation
# status. If not, then a shortcut means that we needn't invoke an
# invalidation up the transform stack as it will already have been
# invalidated.
# N.B This makes the invalidation sticky, once a transform has been
# invalidated as NON_AFFINE, then it will always be invalidated as
# NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.
# In most cases this is not a problem (i.e. for interactive panning and
# zooming) and the only side effect will be on performance.
status_changed = self._invalid < value
if self.pass_through or status_changed:
self._invalid = value
for parent in self._parents.values():
parent._invalidate_internal(value=value,
invalidating_node=self)
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[id(self)] = self
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
Once the "dot" file has been created, it can be turned into a
png easily with::
$> dot -Tpng -o $OUTPUT_FILE $DOT_FILE
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val)
for key, val
in props.iteritems()])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in root.__dict__.iteritems():
if val is child:
name = key
break
fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n'
% (hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicitly.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1, 0] - points[0, 0] == 0 or
points[1, 1] - points[0, 1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates
that define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates
that define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
bottom-left corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
top-right corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,
:attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y > y0 and y < y1))
or (y > y1 and y < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
return Bbox(transform.transform(self.get_points()))
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return Bbox(transform.inverted().transform(self.get_points()))
coefs = {'C': (0.5, 0.5),
'SW': (0, 0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container=None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, basestring):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w - W)) - L,
(b + cy * (h - H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
assert box_aspect > 0 and fig_aspect > 0
if container is None:
container = self
w, h = container.size
H = w * box_aspect / fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect / box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, bboxes)
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self.get_points()
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
assert(len(bboxes))
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points, **kwargs):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self, **kwargs)
self._points = np.asarray(points, np.float_)
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
_unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_)
@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(Bbox._unit_values.copy())
@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
def __repr__(self):
return 'Bbox(%r)' % repr(self._points)
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with "
"update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
'return whether the bbox has changed since init'
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
'return whether the x-limits have changed since init'
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
'return whether the y-limits have changed since init'
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform, **kwargs):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
assert bbox.is_bbox
assert isinstance(transform, Transform)
assert transform.input_dims == 2
assert transform.output_dims == 2
BboxBase.__init__(self, **kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%r, %r)" % (self._bbox, self._transform)
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
points = np.ma.filled(points, 0.0)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :attr:`has_inverse` is True)
If the transform needs to do something non-standard with
:class:`matplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
input_dims = None
"""
The number of input dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
output_dims = None
"""
The number of output dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
has_inverse = False
"""True if this transform has a corresponding inverse transform."""
is_separable = False
"""True if this transform is separable in the x- and y- dimensions."""
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __eq__(self, other):
# equality is based on transform object id. Hence:
# Transform() != Transform().
# Some classes, such as TransformWrapper & AffineBase, will override.
return self is other
def _iter_break_from_left_to_right(self):
"""
Returns an iterator breaking down this transform stack from left to
right recursively. If self == ((A, N), A) then the result will be an
iterator which yields I : ((A, N), A), followed by A : (N, A),
followed by (A, N) : (A), but not ((A, N), A) : I.
This is equivalent to flattening the stack then yielding
``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).
"""
yield IdentityTransform(), self
@property
def depth(self):
"""
Returns the number of transforms which have been chained
together to form this Transform instance.
.. note::
For the special case of a Composite transform, the maximum depth
of the two is returned.
"""
return 1
def contains_branch(self, other):
"""
Return whether the given transform is a sub-tree of this transform.
This routine uses transform equality to identify sub-trees, therefore
in many situations it is object id which will be used.
For the case where the given transform represents the whole
of this transform, returns True.
"""
if self.depth < other.depth:
return False
# check that a subtree is equal to other (starting from self)
for _, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return True
return False
def contains_branch_seperately(self, other_transform):
"""
Returns whether the given branch is a sub-tree of this transform on
each seperate dimension.
A common use for this method is to identify if a transform is a blended
transform containing an axes' data transform. e.g.::
x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)
"""
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
# for a non-blended transform each seperate dimension is the same, so
# just return the appropriate shape.
return [self.contains_branch(other_transform)] * 2
def __sub__(self, other):
"""
Returns a transform stack which goes all the way down self's transform
stack, and then ascends back up other's stack. If it can, this is
optimised::
# normally
A - B == a + b.inverted()
# sometimes, when A contains the tree B there is no need to
# descend all the way down to the base of A (via B), instead we
# can just stop at B.
(A + B) - (B)^-1 == A
# similarly, when B contains tree A, we can avoid decending A at
# all, basically:
A - (A + B) == ((B + A) - A).inverted() or B^-1
For clarity, the result of ``(A + B) - B + B == (A + B)``.
"""
# we only know how to do this operation if other is a Transform.
if not isinstance(other, Transform):
return NotImplemented
for remainder, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return remainder
for remainder, sub_tree in other._iter_break_from_left_to_right():
if sub_tree == self:
if not remainder.has_inverse:
raise ValueError("The shortcut cannot be computed since "
"other's transform includes a non-invertable component.")
return remainder.inverted()
# if we have got this far, then there was no shortcut possible
if other.has_inverse:
return self + other.inverted()
else:
raise ValueError('It is not possible to compute transA - transB '
'since transB cannot be inverted and there is no '
'shortcut possible.')
def __array__(self, *args, **kwargs):
"""
Array interface to get at this Transform's affine matrix.
"""
return self.get_affine().get_matrix()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.transform_affine(self.transform_non_affine(values))
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.get_affine().transform(values)
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return values
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def get_matrix(self):
"""
Get the Affine transformation array for the affine part
of this transform.
"""
return self.get_affine().get_matrix()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
assert len(point) == self.input_dims
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return self.transform_path_affine(self.transform_path_non_affine(path))
def transform_path_affine(self, path):
"""
Returns a path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return self.get_affine().transform_path_affine(path)
def transform_path_non_affine(self, path):
"""
Returns a path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return Path(self.transform_non_affine(path.vertices), path.codes,
path._interpolation_steps)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims != 2 or self.output_dims != 2:
raise NotImplementedError('Only defined in 2D')
# pts must be array with 2 columns for x,y
assert pts.shape[1] == 2
# angles must be a column vector and have same number of
# rows as pts
assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0]
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]
# Transform both sets of points
tpts = self.transform(pts)
tpts2 = self.transform(pts2)
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2(d[:, 1], d[:, 0])
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
assert isinstance(child, Transform)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __eq__(self, other):
return self._child.__eq__(other)
if DEBUG:
def __str__(self):
return str(self._child)
def __getstate__(self):
# only store the child
return {'child': self._child}
def __setstate__(self, state):
# re-initialise the TransformWrapper with the state's child
self.__init__(state['child'])
def __repr__(self):
return "TransformWrapper(%r)" % self._child
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
self.get_matrix = child.get_matrix
# note we do not wrap other properties here since the transform's
# child can be changed with WrappedTransform.set and so checking
# is_affine and other such properties may be dangerous.
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
assert child.input_dims == self.input_dims
assert child.output_dims == self.output_dims
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_affine(self):
return self._child.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self, *args, **kwargs):
Transform.__init__(self, *args, **kwargs)
self._inverted = None
def __array__(self, *args, **kwargs):
# optimises the access of the transform matrix vs the superclass
return self.get_matrix()
@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
def __eq__(self, other):
if other.is_affine:
return np.all(self.get_matrix() == other.get_matrix())
return NotImplemented
def transform(self, values):
return self.transform_affine(values)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, values):
raise NotImplementedError('Affine subclasses should override this '
'method.')
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self.transform_path_affine(path)
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return Path(self.transform_affine(path.vertices),
path.codes, path._interpolation_steps)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
has_inverse = True
input_dims = 2
output_dims = 2
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
def transform_affine(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform(point, mtx)
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(points))
return self._transform_affine(points)
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self, **kwargs)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
# def __cmp__(self, other):
# # XXX redundant. this only tells us eq.
# if (isinstance(other, Affine2D) and
# (self.get_matrix() == other.get_matrix()).all()):
# return 0
# return -1
@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
assert isinstance(other, Affine2DBase)
self._mtx = other.get_matrix()
self.invalidate()
@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees * np.pi / 180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return points
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedAffine2D.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
@property
def depth(self):
return max([self._x.depth, self._y.depth])
def contains_branch(self, other):
# a blended transform cannot possibly contain a branch from two different transforms.
return False
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def _get_has_inverse(self):
return self._x.has_inverse and self._y.has_inverse
has_inverse = property(_get_has_inverse)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
assert x_transform.is_affine
assert y_transform.is_affine
assert x_transform.is_separable
assert y_transform.is_separable
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedGenericTransform.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedTransform.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _invalidate_internal(self, value, invalidating_node):
# In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs
# to be extended to invalidate the NON_AFFINE part too. These cases are when the right
# hand transform is non-affine and either:
# (a) the left hand transform is non affine
# (b) it is the left hand node which has triggered the invalidation
if value == Transform.INVALID_AFFINE \
and not self._b.is_affine \
and (not self._a.is_affine or invalidating_node is self._a):
value = Transform.INVALID
Transform._invalidate_internal(self, value=value,
invalidating_node=invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
@property
def depth(self):
return self._a.depth + self._b.depth
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
def __repr__(self):
return "CompositeGenericTransform(%r, %r)" % (self._a, self._b)
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(points)
else:
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def _get_has_inverse(self):
return self._a.has_inverse and self._b.has_inverse
has_inverse = property(_get_has_inverse)
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
assert a.is_affine
assert b.is_affine
Affine2DBase.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
def __repr__(self):
return "CompositeAffine2D(%r, %r)" % (self._a, self._b)
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout, **kwargs):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
assert boxin.is_bbox
assert boxout.is_bbox
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%r, %r)" % (self._boxin, self._boxout)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout, **kwargs):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
assert boxout.is_bbox
Affine2DBase.__init__(self, **kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformToMaxOnly(BboxTransformTo):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox` with a fixed upper left of (0, 0).
"""
def __repr__(self):
return "BboxTransformToMaxOnly(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin, **kwargs):
assert boxin.is_bbox
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%r)" % (self._boxin)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans, **kwargs):
Affine2DBase.__init__(self, **kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%r)" % (self._t,)
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
.. note::
Paths are considered immutable by this class. Any update to the
path's vertices/codes will not trigger a transform recomputation.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
assert isinstance(transform, Transform)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
# only recompute if the invalidation includes the non_affine part of the transform
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path(self._transform.transform_non_affine(self._path.vertices),
None, self._path._interpolation_steps)
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
self._revalidate()
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Modify the endpoints of a range as needed to avoid singularities.
*vmin*, *vmax*
the initial endpoints.
*tiny*
threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
*expander*
fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
*increasing*: [True | False]
If True (default), swap *vmin*, *vmax* if *vmin* > *vmax*
Returns *vmin*, *vmax*, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0,
returns -*expander*, *expander*.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| mit |
eg-zhang/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
smharper/openmc | openmc/tallies.py | 2 | 123554 | from collections.abc import Iterable, MutableSequence
import copy
from functools import partial, reduce
from itertools import product
from numbers import Integral, Real
import operator
from pathlib import Path
from xml.etree import ElementTree as ET
import h5py
import numpy as np
import pandas as pd
import scipy.sparse as sps
import openmc
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
# The tally arithmetic product types. The tensor product performs the full
# cross product of the data in two tallies with respect to a specified axis
# (filters, nuclides, or scores). The entrywise product performs the arithmetic
# operation entrywise across the entries in two tallies with respect to a
# specified axis.
_PRODUCT_TYPES = ['tensor', 'entrywise']
# The following indicate acceptable types when setting Tally.scores,
# Tally.nuclides, and Tally.filters
_SCORE_CLASSES = (str, openmc.CrossScore, openmc.AggregateScore)
_NUCLIDE_CLASSES = (str, openmc.CrossNuclide, openmc.AggregateNuclide)
_FILTER_CLASSES = (openmc.Filter, openmc.CrossFilter, openmc.AggregateFilter)
# Valid types of estimators
ESTIMATOR_TYPES = ['tracklength', 'collision', 'analog']
class Tally(IDManagerMixin):
"""A tally defined by a set of scores that are accumulated for a list of
nuclides given a set of filters.
Parameters
----------
tally_id : int, optional
Unique identifier for the tally. If none is specified, an identifier
will automatically be assigned
name : str, optional
Name of the tally. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the tally
name : str
Name of the tally
filters : list of openmc.Filter
List of specified filters for the tally
nuclides : list of openmc.Nuclide
List of nuclides to score results for
scores : list of str
List of defined scores, e.g. 'flux', 'fission', etc.
estimator : {'analog', 'tracklength', 'collision'}
Type of estimator for the tally
triggers : list of openmc.Trigger
List of tally triggers
num_scores : int
Total number of scores
num_filter_bins : int
Total number of filter bins accounting for all filters
num_bins : int
Total number of bins for the tally
shape : 3-tuple of int
The shape of the tally data array ordered as the number of filter bins,
nuclide bins and score bins
filter_strides : list of int
Stride in memory for each filter
num_realizations : int
Total number of realizations
with_summary : bool
Whether or not a Summary has been linked
sum : numpy.ndarray
An array containing the sum of each independent realization for each bin
sum_sq : numpy.ndarray
An array containing the sum of each independent realization squared for
each bin
mean : numpy.ndarray
An array containing the sample mean for each bin
std_dev : numpy.ndarray
An array containing the sample standard deviation for each bin
derived : bool
Whether or not the tally is derived from one or more other tallies
sparse : bool
Whether or not the tally uses SciPy's LIL sparse matrix format for
compressed data storage
derivative : openmc.TallyDerivative
A material perturbation derivative to apply to all scores in the tally.
"""
next_id = 1
used_ids = set()
def __init__(self, tally_id=None, name=''):
# Initialize Tally class attributes
self.id = tally_id
self.name = name
self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters')
self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides')
self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores')
self._estimator = None
self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers')
self._derivative = None
self._num_realizations = 0
self._with_summary = False
self._sum = None
self._sum_sq = None
self._mean = None
self._std_dev = None
self._with_batch_statistics = False
self._derived = False
self._sparse = False
self._sp_filename = None
self._results_read = False
def __repr__(self):
parts = ['Tally']
parts.append('{: <15}=\t{}'.format('ID', self.id))
parts.append('{: <15}=\t{}'.format('Name', self.name))
if self.derivative is not None:
parts.append('{: <15}=\t{}'.format('Derivative ID', self.derivative.id))
filters = ', '.join(type(f).__name__ for f in self.filters)
parts.append('{: <15}=\t{}'.format('Filters', filters))
nuclides = ' '.join(str(nuclide) for nuclide in self.nuclides)
parts.append('{: <15}=\t{}'.format('Nuclides', nuclides))
parts.append('{: <15}=\t{}'.format('Scores', self.scores))
parts.append('{: <15}=\t{}'.format('Estimator', self.estimator))
return '\n\t'.join(parts)
@property
def name(self):
return self._name
@property
def filters(self):
return self._filters
@property
def nuclides(self):
return self._nuclides
@property
def num_nuclides(self):
return len(self._nuclides)
@property
def scores(self):
return self._scores
@property
def num_scores(self):
return len(self._scores)
@property
def num_filters(self):
return len(self.filters)
@property
def num_filter_bins(self):
return reduce(operator.mul, (f.num_bins for f in self.filters), 1)
@property
def num_bins(self):
return self.num_filter_bins * self.num_nuclides * self.num_scores
@property
def shape(self):
return (self.num_filter_bins, self.num_nuclides, self.num_scores)
@property
def estimator(self):
return self._estimator
@property
def triggers(self):
return self._triggers
@property
def num_realizations(self):
return self._num_realizations
@property
def with_summary(self):
return self._with_summary
def _read_results(self):
if self._results_read:
return
# Open the HDF5 statepoint file
with h5py.File(self._sp_filename, 'r') as f:
# Extract Tally data from the file
data = f['tallies/tally {}/results'.format(self.id)]
sum_ = data[:, :, 0]
sum_sq = data[:, :, 1]
# Reshape the results arrays
sum_ = np.reshape(sum_, self.shape)
sum_sq = np.reshape(sum_sq, self.shape)
# Set the data for this Tally
self._sum = sum_
self._sum_sq = sum_sq
# Convert NumPy arrays to SciPy sparse LIL matrices
if self.sparse:
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), self._sum_sq.shape)
# Indicate that Tally results have been read
self._results_read = True
@property
def sum(self):
if not self._sp_filename or self.derived:
return None
# Make sure results have been read
self._read_results()
if self.sparse:
return np.reshape(self._sum.toarray(), self.shape)
else:
return self._sum
@property
def sum_sq(self):
if not self._sp_filename or self.derived:
return None
# Make sure results have been read
self._read_results()
if self.sparse:
return np.reshape(self._sum_sq.toarray(), self.shape)
else:
return self._sum_sq
@property
def mean(self):
if self._mean is None:
if not self._sp_filename:
return None
self._mean = self.sum / self.num_realizations
# Convert NumPy array to SciPy sparse LIL matrix
if self.sparse:
self._mean = sps.lil_matrix(self._mean.flatten(),
self._mean.shape)
if self.sparse:
return np.reshape(self._mean.toarray(), self.shape)
else:
return self._mean
@property
def std_dev(self):
if self._std_dev is None:
if not self._sp_filename:
return None
n = self.num_realizations
nonzero = np.abs(self.mean) > 0
self._std_dev = np.zeros_like(self.mean)
self._std_dev[nonzero] = np.sqrt((self.sum_sq[nonzero]/n -
self.mean[nonzero]**2)/(n - 1))
# Convert NumPy array to SciPy sparse LIL matrix
if self.sparse:
self._std_dev = sps.lil_matrix(self._std_dev.flatten(),
self._std_dev.shape)
self.with_batch_statistics = True
if self.sparse:
return np.reshape(self._std_dev.toarray(), self.shape)
else:
return self._std_dev
@property
def with_batch_statistics(self):
return self._with_batch_statistics
@property
def derived(self):
return self._derived
@property
def derivative(self):
return self._derivative
@property
def sparse(self):
return self._sparse
@estimator.setter
def estimator(self, estimator):
cv.check_value('estimator', estimator, ESTIMATOR_TYPES)
self._estimator = estimator
@triggers.setter
def triggers(self, triggers):
cv.check_type('tally triggers', triggers, MutableSequence)
self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers',
triggers)
@name.setter
def name(self, name):
cv.check_type('tally name', name, str, none_ok=True)
self._name = name
@derivative.setter
def derivative(self, deriv):
cv.check_type('tally derivative', deriv, openmc.TallyDerivative,
none_ok=True)
self._derivative = deriv
@filters.setter
def filters(self, filters):
cv.check_type('tally filters', filters, MutableSequence)
# If the filter is already in the Tally, raise an error
visited_filters = set()
for f in filters:
if f in visited_filters:
msg = 'Unable to add a duplicate filter "{}" to Tally ID="{}" ' \
'since duplicate filters are not supported in the OpenMC ' \
'Python API'.format(f, self.id)
raise ValueError(msg)
visited_filters.add(f)
self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters', filters)
@nuclides.setter
def nuclides(self, nuclides):
cv.check_type('tally nuclides', nuclides, MutableSequence)
# If the nuclide is already in the Tally, raise an error
visited_nuclides = set()
for nuc in nuclides:
if nuc in visited_nuclides:
msg = 'Unable to add a duplicate nuclide "{}" to Tally ID="{}" ' \
'since duplicate nuclides are not supported in the OpenMC ' \
'Python API'.format(nuclide, self.id)
raise ValueError(msg)
visited_nuclides.add(nuc)
self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides',
nuclides)
@scores.setter
def scores(self, scores):
cv.check_type('tally scores', scores, MutableSequence)
visited_scores = set()
for i, score in enumerate(scores):
# If the score is already in the Tally, raise an error
if score in visited_scores:
msg = 'Unable to add a duplicate score "{}" to Tally ID="{}" ' \
'since duplicate scores are not supported in the OpenMC ' \
'Python API'.format(score, self.id)
raise ValueError(msg)
visited_scores.add(score)
# If score is a string, strip whitespace
if isinstance(score, str):
# Check to see if scores are deprecated before storing
for deprecated in ['scatter-', 'nu-scatter-', 'scatter-p',
'nu-scatter-p', 'scatter-y', 'nu-scatter-y',
'flux-y', 'total-y']:
if score.strip().startswith(deprecated):
msg = score.strip() + ' is no longer supported.'
raise ValueError(msg)
scores[i] = score.strip()
self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores', scores)
@num_realizations.setter
def num_realizations(self, num_realizations):
cv.check_type('number of realizations', num_realizations, Integral)
cv.check_greater_than('number of realizations', num_realizations, 0, True)
self._num_realizations = num_realizations
@with_summary.setter
def with_summary(self, with_summary):
cv.check_type('with_summary', with_summary, bool)
self._with_summary = with_summary
@with_batch_statistics.setter
def with_batch_statistics(self, with_batch_statistics):
cv.check_type('with_batch_statistics', with_batch_statistics, bool)
self._with_batch_statistics = with_batch_statistics
@sum.setter
def sum(self, sum):
cv.check_type('sum', sum, Iterable)
self._sum = sum
@sum_sq.setter
def sum_sq(self, sum_sq):
cv.check_type('sum_sq', sum_sq, Iterable)
self._sum_sq = sum_sq
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within the Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
# Convert NumPy arrays to SciPy sparse LIL matrices
if sparse and not self.sparse:
if self._sum is not None:
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
if self._sum_sq is not None:
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(),
self._sum_sq.shape)
if self._mean is not None:
self._mean = sps.lil_matrix(self._mean.flatten(),
self._mean.shape)
if self._std_dev is not None:
self._std_dev = sps.lil_matrix(self._std_dev.flatten(),
self._std_dev.shape)
self._sparse = True
# Convert SciPy sparse LIL matrices to NumPy arrays
elif not sparse and self.sparse:
if self._sum is not None:
self._sum = np.reshape(self._sum.toarray(), self.shape)
if self._sum_sq is not None:
self._sum_sq = np.reshape(self._sum_sq.toarray(), self.shape)
if self._mean is not None:
self._mean = np.reshape(self._mean.toarray(), self.shape)
if self._std_dev is not None:
self._std_dev = np.reshape(self._std_dev.toarray(), self.shape)
self._sparse = False
def remove_score(self, score):
"""Remove a score from the tally
Parameters
----------
score : str
Score to remove
"""
if score not in self.scores:
msg = 'Unable to remove score "{}" from Tally ID="{}" since ' \
'the Tally does not contain this score'.format(score, self.id)
raise ValueError(msg)
self._scores.remove(score)
def remove_filter(self, old_filter):
"""Remove a filter from the tally
Parameters
----------
old_filter : openmc.Filter
Filter to remove
"""
if old_filter not in self.filters:
msg = 'Unable to remove filter "{}" from Tally ID="{}" since the ' \
'Tally does not contain this filter'.format(old_filter, self.id)
raise ValueError(msg)
self._filters.remove(old_filter)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the tally
Parameters
----------
nuclide : openmc.Nuclide
Nuclide to remove
"""
if nuclide not in self.nuclides:
msg = 'Unable to remove nuclide "{}" from Tally ID="{}" since the ' \
'Tally does not contain this nuclide'.format(nuclide, self.id)
raise ValueError(msg)
self._nuclides.remove(nuclide)
def _can_merge_filters(self, other):
"""Determine if another tally's filters can be merged with this one's
The types of filters between the two tallies must match identically.
The bins in all of the filters must match identically, or be mergeable
in only one filter. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable filters
"""
# Two tallies must have the same number of filters
if len(self.filters) != len(other.filters):
return False
# Return False if only one tally has a delayed group filter
tally1_dg = self.contains_filter(openmc.DelayedGroupFilter)
tally2_dg = other.contains_filter(openmc.DelayedGroupFilter)
if tally1_dg != tally2_dg:
return False
# Look to see if all filters are the same, or one or more can be merged
for filter1 in self.filters:
mergeable = False
for filter2 in other.filters:
if filter1 == filter2 or filter1.can_merge(filter2):
mergeable = True
break
# If no mergeable filter was found, the tallies are not mergeable
if not mergeable:
return False
# Tally filters are mergeable if all conditional checks passed
return True
def _can_merge_nuclides(self, other):
"""Determine if another tally's nuclides can be merged with this one's
The nuclides between the two tallies must be mutually exclusive or
identically matching. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable nuclides
"""
no_nuclides_match = True
all_nuclides_match = True
# Search for each of this tally's nuclides in the other tally
for nuclide in self.nuclides:
if nuclide not in other.nuclides:
all_nuclides_match = False
else:
no_nuclides_match = False
# Search for each of the other tally's nuclides in this tally
for nuclide in other.nuclides:
if nuclide not in self.nuclides:
all_nuclides_match = False
else:
no_nuclides_match = False
# Either all nuclides should match, or none should
return no_nuclides_match or all_nuclides_match
def _can_merge_scores(self, other):
"""Determine if another tally's scores can be merged with this one's
The scores between the two tallies must be mutually exclusive or
identically matching. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable scores
"""
no_scores_match = True
all_scores_match = True
# Search for each of this tally's scores in the other tally
for score in self.scores:
if score in other.scores:
no_scores_match = False
# Search for each of the other tally's scores in this tally
for score in other.scores:
if score not in self.scores:
all_scores_match = False
else:
no_scores_match = False
if score == 'current' and score not in self.scores:
return False
# Nuclides cannot be specified on 'flux' scores
if 'flux' in self.scores or 'flux' in other.scores:
if self.nuclides != other.nuclides:
return False
# Either all scores should match, or none should
return no_scores_match or all_scores_match
def can_merge(self, other):
"""Determine if another tally can be merged with this one
If results have been loaded from a statepoint, then tallies are only
mergeable along one and only one of filter bins, nuclides or scores.
Parameters
----------
other : openmc.Tally
Tally to check for merging
"""
if not isinstance(other, Tally):
return False
# Must have same estimator
if self.estimator != other.estimator:
return False
equal_filters = sorted(self.filters) == sorted(other.filters)
equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides)
equal_scores = sorted(self.scores) == sorted(other.scores)
equality = [equal_filters, equal_nuclides, equal_scores]
# If all filters, nuclides and scores match then tallies are mergeable
if all(equality):
return True
# Variables to indicate filter bins, nuclides, and scores that can be merged
can_merge_filters = self._can_merge_filters(other)
can_merge_nuclides = self._can_merge_nuclides(other)
can_merge_scores = self._can_merge_scores(other)
mergeability = [can_merge_filters, can_merge_nuclides, can_merge_scores]
if not all(mergeability):
return False
# If the tally results have been read from the statepoint, at least two
# of filters, nuclides and scores must match
else:
return not self._results_read or sum(equality) >= 2
def merge(self, other):
"""Merge another tally with this one
If results have been loaded from a statepoint, then tallies are only
mergeable along one and only one of filter bins, nuclides or scores.
Parameters
----------
other : openmc.Tally
Tally to merge with this one
Returns
-------
merged_tally : openmc.Tally
Merged tallies
"""
if not self.can_merge(other):
msg = 'Unable to merge tally ID="{}" with "{}"'.format(
other.id, self.id)
raise ValueError(msg)
# Create deep copy of tally to return as merged tally
merged_tally = copy.deepcopy(self)
# Differentiate Tally with a new auto-generated Tally ID
merged_tally.id = None
# Create deep copy of other tally to use for array concatenation
other_copy = copy.deepcopy(other)
# Identify if filters, nuclides and scores are mergeable and/or equal
merge_filters = self._can_merge_filters(other)
merge_nuclides = self._can_merge_nuclides(other)
merge_scores = self._can_merge_scores(other)
equal_filters = sorted(self.filters) == sorted(other.filters)
equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides)
equal_scores = sorted(self.scores) == sorted(other.scores)
# If two tallies can be merged along a filter's bins
if merge_filters and not equal_filters:
# Search for mergeable filters
for i, filter1 in enumerate(self.filters):
for filter2 in other.filters:
if filter1 != filter2 and filter1.can_merge(filter2):
other_copy._swap_filters(other_copy.filters[i], filter2)
merged_tally.filters[i] = filter1.merge(filter2)
join_right = filter1 < filter2
merge_axis = i
break
# If two tallies can be merged along nuclide bins
if merge_nuclides and not equal_nuclides:
merge_axis = self.num_filters
join_right = True
# Add unique nuclides from other tally to merged tally
for nuclide in other.nuclides:
if nuclide not in merged_tally.nuclides:
merged_tally.nuclides.append(nuclide)
# If two tallies can be merged along score bins
if merge_scores and not equal_scores:
merge_axis = self.num_filters + 1
join_right = True
# Add unique scores from other tally to merged tally
for score in other.scores:
if score not in merged_tally.scores:
merged_tally.scores.append(score)
# Add triggers from other tally to merged tally
for trigger in other.triggers:
merged_tally.triggers.append(trigger)
# If results have not been read, then return tally for input generation
if self._results_read is None:
return merged_tally
# Otherwise, this is a derived tally which needs merged results arrays
else:
self._derived = True
# Concatenate sum arrays if present in both tallies
if self.sum is not None and other_copy.sum is not None:
self_sum = self.get_reshaped_data(value='sum')
other_sum = other_copy.get_reshaped_data(value='sum')
if join_right:
merged_sum = np.concatenate((self_sum, other_sum),
axis=merge_axis)
else:
merged_sum = np.concatenate((other_sum, self_sum),
axis=merge_axis)
merged_tally._sum = np.reshape(merged_sum, merged_tally.shape)
# Concatenate sum_sq arrays if present in both tallies
if self.sum_sq is not None and other.sum_sq is not None:
self_sum_sq = self.get_reshaped_data(value='sum_sq')
other_sum_sq = other_copy.get_reshaped_data(value='sum_sq')
if join_right:
merged_sum_sq = np.concatenate((self_sum_sq, other_sum_sq),
axis=merge_axis)
else:
merged_sum_sq = np.concatenate((other_sum_sq, self_sum_sq),
axis=merge_axis)
merged_tally._sum_sq = np.reshape(merged_sum_sq, merged_tally.shape)
# Concatenate mean arrays if present in both tallies
if self.mean is not None and other.mean is not None:
self_mean = self.get_reshaped_data(value='mean')
other_mean = other_copy.get_reshaped_data(value='mean')
if join_right:
merged_mean = np.concatenate((self_mean, other_mean),
axis=merge_axis)
else:
merged_mean = np.concatenate((other_mean, self_mean),
axis=merge_axis)
merged_tally._mean = np.reshape(merged_mean, merged_tally.shape)
# Concatenate std. dev. arrays if present in both tallies
if self.std_dev is not None and other.std_dev is not None:
self_std_dev = self.get_reshaped_data(value='std_dev')
other_std_dev = other_copy.get_reshaped_data(value='std_dev')
if join_right:
merged_std_dev = np.concatenate((self_std_dev, other_std_dev),
axis=merge_axis)
else:
merged_std_dev = np.concatenate((other_std_dev, self_std_dev),
axis=merge_axis)
merged_tally._std_dev = np.reshape(merged_std_dev, merged_tally.shape)
# Sparsify merged tally if both tallies are sparse
merged_tally.sparse = self.sparse and other.sparse
return merged_tally
def to_xml_element(self):
"""Return XML representation of the tally
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing tally data
"""
element = ET.Element("tally")
# Tally ID
element.set("id", str(self.id))
# Optional Tally name
if self.name != '':
element.set("name", self.name)
# Optional Tally filters
if len(self.filters) > 0:
subelement = ET.SubElement(element, "filters")
subelement.text = ' '.join(str(f.id) for f in self.filters)
# Optional Nuclides
if self.nuclides:
subelement = ET.SubElement(element, "nuclides")
subelement.text = ' '.join(str(n) for n in self.nuclides)
# Scores
if len(self.scores) == 0:
msg = 'Unable to get XML for Tally ID="{}" since it does not ' \
'contain any scores'.format(self.id)
raise ValueError(msg)
else:
scores = ''
for score in self.scores:
scores += '{} '.format(score)
subelement = ET.SubElement(element, "scores")
subelement.text = scores.rstrip(' ')
# Tally estimator type
if self.estimator is not None:
subelement = ET.SubElement(element, "estimator")
subelement.text = self.estimator
# Optional Triggers
for trigger in self.triggers:
trigger.get_trigger_xml(element)
# Optional derivatives
if self.derivative is not None:
subelement = ET.SubElement(element, "derivative")
subelement.text = str(self.derivative.id)
return element
def contains_filter(self, filter_type):
"""Looks for a filter in the tally that matches a specified type
Parameters
----------
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
Returns
-------
filter_found : bool
True if the tally contains a filter of the requested type;
otherwise false
"""
for test_filter in self.filters:
if type(test_filter) is filter_type:
return True
return False
def find_filter(self, filter_type):
"""Return a filter in the tally that matches a specified type
Parameters
----------
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
Returns
-------
filter_found : openmc.Filter
Filter from this tally with matching type, or None if no matching
Filter is found
Raises
------
ValueError
If no matching Filter is found
"""
# Look through all of this Tally's Filters for the type requested
for test_filter in self.filters:
if type(test_filter) is filter_type:
return test_filter
# Also check to see if the desired filter is wrapped up in an
# aggregate
elif isinstance(test_filter, openmc.AggregateFilter):
if isinstance(test_filter.aggregate_filter, filter_type):
return test_filter
# If we did not find the Filter, throw an Exception
msg = 'Unable to find filter type "{}" in Tally ID="{}"'.format(
filter_type, self.id)
raise ValueError(msg)
def get_nuclide_index(self, nuclide):
"""Returns the index in the Tally's results array for a Nuclide bin
Parameters
----------
nuclide : str
The name of the Nuclide (e.g., 'H1', 'U238')
Returns
-------
nuclide_index : int
The index in the Tally data array for this nuclide.
Raises
------
KeyError
When the argument passed to the 'nuclide' parameter cannot be found
in the Tally.
"""
# Look for the user-requested nuclide in all of the Tally's Nuclides
for i, test_nuclide in enumerate(self.nuclides):
# If the Summary was linked, then values are Nuclide objects
if isinstance(test_nuclide, openmc.Nuclide):
if test_nuclide.name == nuclide:
return i
# If the Summary has not been linked, then values are ZAIDs
else:
if test_nuclide == nuclide:
return i
msg = ('Unable to get the nuclide index for Tally since "{}" '
'is not one of the nuclides'.format(nuclide))
raise KeyError(msg)
def get_score_index(self, score):
"""Returns the index in the Tally's results array for a score bin
Parameters
----------
score : str
The score string (e.g., 'absorption', 'nu-fission')
Returns
-------
score_index : int
The index in the Tally data array for this score.
Raises
------
ValueError
When the argument passed to the 'score' parameter cannot be found in
the Tally.
"""
try:
score_index = self.scores.index(score)
except ValueError:
msg = 'Unable to get the score index for Tally since "{}" ' \
'is not one of the scores'.format(score)
raise ValueError(msg)
return score_index
def get_filter_indices(self, filters=[], filter_bins=[]):
"""Get indices into the filter axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the filter
axis of the tally's data array (axis=0) for particular combinations
of filters and their corresponding bins.
Parameters
----------
filters : Iterable of openmc.FilterMeta
An iterable of filter types
(e.g., [MeshFilter, EnergyFilter]; default is [])
filter_bins : Iterable of tuple
A list of tuples of filter bins corresponding to the filter_types
parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each
tuple contains bins for the corresponding filter type in the filters
parameter. Each bin is an integer ID for Material-, Surface-,
Cell-, Cellborn-, and Universe- Filters. Each bin is an integer
for the cell instance ID for DistribcellFilters. Each bin is a
2-tuple of floats for Energy- and Energyout- Filters corresponding
to the energy boundaries of the bin of interest. The bin is an
(x,y,z) 3-tuple for MeshFilters corresponding to the mesh cell
of interest. The order of the bins in the list must correspond to
the filter_types parameter.
Returns
-------
numpy.ndarray
A NumPy array of the filter indices
"""
cv.check_type('filters', filters, Iterable, openmc.FilterMeta)
cv.check_type('filter_bins', filter_bins, Iterable, tuple)
# If user did not specify any specific Filters, use them all
if not filters:
return np.arange(self.num_filter_bins)
# Initialize empty list of indices for each bin in each Filter
filter_indices = []
# Loop over all of the Tally's Filters
for i, self_filter in enumerate(self.filters):
# If a user-requested Filter, get the user-requested bins
for j, test_filter in enumerate(filters):
if type(self_filter) is test_filter:
bins = filter_bins[j]
break
else:
# If not a user-requested Filter, get all bins
if isinstance(self_filter, openmc.DistribcellFilter):
# Create list of cell instance IDs for distribcell Filters
bins = list(range(self_filter.num_bins))
elif isinstance(self_filter, openmc.EnergyFunctionFilter):
# EnergyFunctionFilters don't have bins so just add a None
bins = [None]
else:
# Create list of IDs for bins for all other filter types
bins = self_filter.bins
# Add indices for each bin in this Filter to the list
indices = np.array([self_filter.get_bin_index(b) for b in bins])
filter_indices.append(indices)
# Account for stride in each of the previous filters
for indices in filter_indices[:i]:
indices *= self_filter.num_bins
# Apply outer product sum between all filter bin indices
return list(map(sum, product(*filter_indices)))
def get_nuclide_indices(self, nuclides):
"""Get indices into the nuclide axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the nuclide
axis of the tally's data array (axis=1) for one or more nuclides.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
Returns
-------
numpy.ndarray
A NumPy array of the nuclide indices
"""
cv.check_iterable_type('nuclides', nuclides, str)
# If user did not specify any specific Nuclides, use them all
if not nuclides:
return np.arange(self.num_nuclides)
# Determine the score indices from any of the requested scores
nuclide_indices = np.zeros_like(nuclides, dtype=int)
for i, nuclide in enumerate(nuclides):
nuclide_indices[i] = self.get_nuclide_index(nuclide)
return nuclide_indices
def get_score_indices(self, scores):
"""Get indices into the score axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the score
axis of the tally's data array (axis=2) for one or more scores.
Parameters
----------
scores : list of str or openmc.CrossScore
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
Returns
-------
numpy.ndarray
A NumPy array of the score indices
"""
for score in scores:
if not isinstance(score, (str, openmc.CrossScore)):
msg = 'Unable to get score indices for score "{}" in Tally ' \
'ID="{}" since it is not a string or CrossScore'\
.format(score, self.id)
raise ValueError(msg)
# Determine the score indices from any of the requested scores
if scores:
score_indices = np.zeros(len(scores), dtype=int)
for i, score in enumerate(scores):
score_indices[i] = self.get_score_index(score)
# If user did not specify any specific scores, use them all
else:
score_indices = np.arange(self.num_scores)
return score_indices
def get_values(self, scores=[], filters=[], filter_bins=[],
nuclides=[], value='mean'):
"""Returns one or more tallied values given a list of scores, filters,
filter bins and nuclides.
This method constructs a 3D NumPy array for the requested Tally data
indexed by filter bin, nuclide bin, and score index. The method will
order the data in the array as specified in the parameter lists.
Parameters
----------
scores : list of str
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
filters : Iterable of openmc.FilterMeta
An iterable of filter types
(e.g., [MeshFilter, EnergyFilter]; default is [])
filter_bins : list of Iterables
A list of tuples of filter bins corresponding to the filter_types
parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each
tuple contains bins for the corresponding filter type in the filters
parameter. Each bins is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. The bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell
of interest. The order of the bins in the list must correspond to
the filter_types parameter.
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
float or numpy.ndarray
A scalar or NumPy array of the Tally data indexed in the order
each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data,
or the input parameters do not correspond to the Tally's attributes,
e.g., if the score(s) do not match those in the Tally.
"""
# Ensure that the tally has data
if (value == 'mean' and self.mean is None) or \
(value == 'std_dev' and self.std_dev is None) or \
(value == 'rel_err' and self.mean is None) or \
(value == 'sum' and self.sum is None) or \
(value == 'sum_sq' and self.sum_sq is None):
msg = 'The Tally ID="{}" has no data to return'.format(self.id)
raise ValueError(msg)
# Get filter, nuclide and score indices
filter_indices = self.get_filter_indices(filters, filter_bins)
nuclide_indices = self.get_nuclide_indices(nuclides)
score_indices = self.get_score_indices(scores)
# Construct outer product of all three index types with each other
indices = np.ix_(filter_indices, nuclide_indices, score_indices)
# Return the desired result from Tally
if value == 'mean':
data = self.mean[indices]
elif value == 'std_dev':
data = self.std_dev[indices]
elif value == 'rel_err':
data = self.std_dev[indices] / self.mean[indices]
elif value == 'sum':
data = self.sum[indices]
elif value == 'sum_sq':
data = self.sum_sq[indices]
else:
msg = 'Unable to return results from Tally ID="{}" since the ' \
'the requested value "{}" is not \'mean\', \'std_dev\', ' \
'\'rel_err\', \'sum\', or \'sum_sq\''.format(self.id, value)
raise LookupError(msg)
return data
def get_pandas_dataframe(self, filters=True, nuclides=True, scores=True,
derivative=True, paths=True, float_format='{:.2e}'):
"""Build a Pandas DataFrame for the Tally data.
This method constructs a Pandas DataFrame object for the Tally data
with columns annotated by filter, nuclide and score bin information.
This capability has been tested for Pandas >=0.13.1. However, it is
recommended to use v0.16 or newer versions of Pandas since this method
uses the Multi-index Pandas feature.
Parameters
----------
filters : bool
Include columns with filter bin information (default is True).
nuclides : bool
Include columns with nuclide bin information (default is True).
scores : bool
Include columns with score bin information (default is True).
derivative : bool
Include columns with differential tally info (default is True).
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into a
Multi-index column with a geometric "path" to each distribcell
instance.
float_format : str
All floats in the DataFrame will be formatted using the given
format string before printing.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with each column annotated by filter, nuclide and
score bin information (if these parameters are True), and the mean
and standard deviation of the Tally's data.
Raises
------
KeyError
When this method is called before the Tally is populated with data
"""
# Ensure that the tally has data
if self.mean is None or self.std_dev is None:
msg = 'The Tally ID="{}" has no data to return'.format(self.id)
raise KeyError(msg)
# Initialize a pandas dataframe for the tally data
df = pd.DataFrame()
# Find the total length of the tally data array
data_size = self.mean.size
# Build DataFrame columns for filters if user requested them
if filters:
# Append each Filter's DataFrame to the overall DataFrame
for f, stride in zip(self.filters, self.filter_strides):
filter_df = f.get_pandas_dataframe(
data_size, stride, paths=paths)
df = pd.concat([df, filter_df], axis=1)
# Include DataFrame column for nuclides if user requested it
if nuclides:
nuclides = []
column_name = 'nuclide'
for nuclide in self.nuclides:
if isinstance(nuclide, openmc.Nuclide):
nuclides.append(nuclide.name)
elif isinstance(nuclide, openmc.AggregateNuclide):
nuclides.append(nuclide.name)
column_name = '{}(nuclide)'.format(nuclide.aggregate_op)
else:
nuclides.append(nuclide)
# Tile the nuclide bins into a DataFrame column
nuclides = np.repeat(nuclides, len(self.scores))
tile_factor = data_size / len(nuclides)
df[column_name] = np.tile(nuclides, int(tile_factor))
# Include column for scores if user requested it
if scores:
scores = []
column_name = 'score'
for score in self.scores:
if isinstance(score, (str, openmc.CrossScore)):
scores.append(str(score))
elif isinstance(score, openmc.AggregateScore):
scores.append(score.name)
column_name = '{}(score)'.format(score.aggregate_op)
tile_factor = data_size / len(self.scores)
df[column_name] = np.tile(scores, int(tile_factor))
# Include columns for derivatives if user requested it
if derivative and (self.derivative is not None):
df['d_variable'] = self.derivative.variable
if self.derivative.material is not None:
df['d_material'] = self.derivative.material
if self.derivative.nuclide is not None:
df['d_nuclide'] = self.derivative.nuclide
# Append columns with mean, std. dev. for each tally bin
df['mean'] = self.mean.ravel()
df['std. dev.'] = self.std_dev.ravel()
df = df.dropna(axis=1)
# Expand the columns into Pandas MultiIndices for readability
if pd.__version__ >= '0.16':
columns = copy.deepcopy(df.columns.values)
# Convert all elements in columns list to tuples
for i, column in enumerate(columns):
if not isinstance(column, tuple):
columns[i] = (column,)
# Make each tuple the same length
max_len_column = len(max(columns, key=len))
for i, column in enumerate(columns):
delta_len = max_len_column - len(column)
if delta_len > 0:
new_column = list(column)
new_column.extend(['']*delta_len)
columns[i] = tuple(new_column)
# Create and set a MultiIndex for the DataFrame's columns, but only
# if any column actually is multi-level (e.g., a mesh filter)
if any(len(c) > 1 for c in columns):
df.columns = pd.MultiIndex.from_tuples(columns)
# Modify the df.to_string method so that it prints formatted strings.
# Credit to http://stackoverflow.com/users/3657742/chrisb for this trick
df.to_string = partial(df.to_string, float_format=float_format.format)
return df
def get_reshaped_data(self, value='mean'):
"""Returns an array of tally data with one dimension per filter.
The tally data in OpenMC is stored as a 3D array with the dimensions
corresponding to filters, nuclides and scores. As a result, tally data
can be opaque for a user to directly index (i.e., without use of
:meth:`openmc.Tally.get_values`) since one must know how to properly use
the number of bins and strides for each filter to index into the first
(filter) dimension.
This builds and returns a reshaped version of the tally data array with
unique dimensions corresponding to each tally filter. For example,
suppose this tally has arrays of data with shape (8,5,5) corresponding
to two filters (2 and 4 bins, respectively), five nuclides and five
scores. This method will return a version of the data array with the
with a new shape of (2,4,5,5) such that the first two dimensions
correspond directly to the two filters with two and four bins.
Parameters
----------
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
numpy.ndarray
The tally data array indexed by filters, nuclides and scores.
"""
# Get the 3D array of data in filters, nuclides and scores
data = self.get_values(value=value)
# Build a new array shape with one dimension per filter
new_shape = tuple(f.num_bins for f in self.filters)
new_shape += (self.num_nuclides, self.num_scores)
# Reshape the data with one dimension for each filter
data = np.reshape(data, new_shape)
return data
def hybrid_product(self, other, binary_op, filter_product=None,
nuclide_product=None, score_product=None):
"""Combines filters, scores and nuclides with another tally.
This is a helper method for the tally arithmetic operator overloaded
methods. It is called a "hybrid product" because it performs a
combination of tensor (or Kronecker) and entrywise (or Hadamard)
products. The filters from both tallies are combined using an entrywise
(or Hadamard) product on matching filters. By default, if all nuclides
are identical in the two tallies, the entrywise product is performed
across nuclides; else the tensor product is performed. By default, if
all scores are identical in the two tallies, the entrywise product is
performed across scores; else the tensor product is performed. Users
can also call the method explicitly and specify the desired product.
Parameters
----------
other : openmc.Tally
The tally on the right hand side of the hybrid product
binary_op : {'+', '-', '*', '/', '^'}
The binary operation in the hybrid product
filter_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
filter data. The default is the entrywise product. Currently only
the entrywise product is supported since a tally cannot contain
two of the same filter.
nuclide_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
nuclide data. The default is the entrywise product if all nuclides
between the two tallies are the same; otherwise the default is
the tensor product.
score_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
score data. The default is the entrywise product if all scores
between the two tallies are the same; otherwise the default is
the tensor product.
Returns
-------
openmc.Tally
A new Tally that is the hybrid product with this one.
Raises
------
ValueError
When this method is called before the other tally is populated
with data.
"""
# Set default value for filter product if it was not set
if filter_product is None:
filter_product = 'entrywise'
elif filter_product == 'tensor':
msg = 'Unable to perform Tally arithmetic with a tensor product' \
'for the filter data as this is not currently supported.'
raise ValueError(msg)
# Set default value for nuclide product if it was not set
if nuclide_product is None:
if self.nuclides == other.nuclides:
nuclide_product = 'entrywise'
else:
nuclide_product = 'tensor'
# Set default value for score product if it was not set
if score_product is None:
if self.scores == other.scores:
score_product = 'entrywise'
else:
score_product = 'tensor'
# Check product types
cv.check_value('filter product', filter_product, _PRODUCT_TYPES)
cv.check_value('nuclide product', nuclide_product, _PRODUCT_TYPES)
cv.check_value('score product', score_product, _PRODUCT_TYPES)
# Check that results have been read
if not other.derived and other.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(other.id)
raise ValueError(msg)
new_tally = Tally()
new_tally._derived = True
new_tally.with_batch_statistics = True
new_tally._num_realizations = self.num_realizations
new_tally._estimator = self.estimator
new_tally._with_summary = self.with_summary
new_tally._sp_filename = self._sp_filename
# Construct a combined derived name from the two tally operands
if self.name != '' and other.name != '':
new_name = '({} {} {})'.format(self.name, binary_op, other.name)
new_tally.name = new_name
# Query the mean and std dev so the tally data is read in from file
# if it has not already been read in.
self.mean, self.std_dev, other.mean, other.std_dev
# Create copies of self and other tallies to rearrange for tally
# arithmetic
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
self_copy.sparse = False
other_copy.sparse = False
# Align the tally data based on desired hybrid product
data = self_copy._align_tally_data(other_copy, filter_product,
nuclide_product, score_product)
# Perform tally arithmetic operation
if binary_op == '+':
new_tally._mean = data['self']['mean'] + data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '-':
new_tally._mean = data['self']['mean'] - data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '*':
with np.errstate(divide='ignore', invalid='ignore'):
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] * data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '/':
with np.errstate(divide='ignore', invalid='ignore'):
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] / data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '^':
with np.errstate(divide='ignore', invalid='ignore'):
mean_ratio = data['other']['mean'] / data['self']['mean']
first_term = mean_ratio * data['self']['std. dev.']
second_term = \
np.log(data['self']['mean']) * data['other']['std. dev.']
new_tally._mean = data['self']['mean'] ** data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(first_term**2 + second_term**2)
# Convert any infs and nans to zero
new_tally._mean[np.isinf(new_tally._mean)] = 0
new_tally._mean = np.nan_to_num(new_tally._mean)
new_tally._std_dev[np.isinf(new_tally._std_dev)] = 0
new_tally._std_dev = np.nan_to_num(new_tally._std_dev)
# Set tally attributes
if self_copy.estimator == other_copy.estimator:
new_tally.estimator = self_copy.estimator
if self_copy.with_summary and other_copy.with_summary:
new_tally.with_summary = self_copy.with_summary
if self_copy.num_realizations == other_copy.num_realizations:
new_tally.num_realizations = self_copy.num_realizations
# Add filters to the new tally
if filter_product == 'entrywise':
for self_filter in self_copy.filters:
new_tally.filters.append(self_filter)
else:
all_filters = [self_copy.filters, other_copy.filters]
for self_filter, other_filter in product(*all_filters):
new_filter = openmc.CrossFilter(self_filter, other_filter,
binary_op)
new_tally.filters.append(new_filter)
# Add nuclides to the new tally
if nuclide_product == 'entrywise':
for self_nuclide in self_copy.nuclides:
new_tally.nuclides.append(self_nuclide)
else:
all_nuclides = [self_copy.nuclides, other_copy.nuclides]
for self_nuclide, other_nuclide in product(*all_nuclides):
new_nuclide = openmc.CrossNuclide(self_nuclide, other_nuclide,
binary_op)
new_tally.nuclides.append(new_nuclide)
# Define helper function that handles score units appropriately
# depending on the binary operator
def cross_score(score1, score2, binary_op):
if binary_op == '+' or binary_op == '-':
if score1 == score2:
return score1
else:
return openmc.CrossScore(score1, score2, binary_op)
else:
return openmc.CrossScore(score1, score2, binary_op)
# Add scores to the new tally
if score_product == 'entrywise':
for self_score in self_copy.scores:
new_score = cross_score(self_score, self_score, binary_op)
new_tally.scores.append(new_score)
else:
all_scores = [self_copy.scores, other_copy.scores]
for self_score, other_score in product(*all_scores):
new_score = cross_score(self_score, other_score, binary_op)
new_tally.scores.append(new_score)
return new_tally
@property
def filter_strides(self):
all_strides = []
stride = self.num_nuclides * self.num_scores
for self_filter in reversed(self.filters):
all_strides.append(stride)
stride *= self_filter.num_bins
return all_strides[::-1]
def _align_tally_data(self, other, filter_product, nuclide_product,
score_product):
"""Aligns data from two tallies for tally arithmetic.
This is a helper method to construct a dict of dicts of the "aligned"
data arrays from each tally for tally arithmetic. The method analyzes
the filters, scores and nuclides in both tallies and determines how to
appropriately align the data for vectorized arithmetic. For example,
if the two tallies have different filters, this method will use NumPy
'tile' and 'repeat' operations to the new data arrays such that all
possible combinations of the data in each tally's bins will be made
when the arithmetic operation is applied to the arrays.
Parameters
----------
other : openmc.Tally
The tally to outer product with this tally
filter_product : {'entrywise'}
The type of product to be performed between filter data. Currently,
only the entrywise product is supported for the filter product.
nuclide_product : {'tensor', 'entrywise'}
The type of product (tensor or entrywise) to be performed between
nuclide data.
score_product : {'tensor', 'entrywise'}
The type of product (tensor or entrywise) to be performed between
score data.
Returns
-------
dict
A dictionary of dictionaries to "aligned" 'mean' and 'std. dev'
NumPy arrays for each tally's data.
"""
# Get the set of filters that each tally is missing
other_missing_filters = set(self.filters) - set(other.filters)
self_missing_filters = set(other.filters) - set(self.filters)
# Add filters present in self but not in other to other
for other_filter in other_missing_filters:
filter_copy = copy.deepcopy(other_filter)
other._mean = np.repeat(other.mean, filter_copy.num_bins, axis=0)
other._std_dev = np.repeat(other.std_dev, filter_copy.num_bins, axis=0)
other.filters.append(filter_copy)
# Add filters present in other but not in self to self
for self_filter in self_missing_filters:
filter_copy = copy.deepcopy(self_filter)
self._mean = np.repeat(self.mean, filter_copy.num_bins, axis=0)
self._std_dev = np.repeat(self.std_dev, filter_copy.num_bins, axis=0)
self.filters.append(filter_copy)
# Align other filters with self filters
for i, self_filter in enumerate(self.filters):
other_index = other.filters.index(self_filter)
# If necessary, swap other filter
if other_index != i:
other._swap_filters(self_filter, other.filters[i])
# Repeat and tile the data by nuclide in preparation for performing
# the tensor product across nuclides.
if nuclide_product == 'tensor':
self._mean = np.repeat(self.mean, other.num_nuclides, axis=1)
self._std_dev = np.repeat(self.std_dev, other.num_nuclides, axis=1)
other._mean = np.tile(other.mean, (1, self.num_nuclides, 1))
other._std_dev = np.tile(other.std_dev, (1, self.num_nuclides, 1))
# Add nuclides to each tally such that each tally contains the complete
# set of nuclides necessary to perform an entrywise product. New
# nuclides added to a tally will have all their scores set to zero.
else:
# Get the set of nuclides that each tally is missing
other_missing_nuclides = set(self.nuclides) - set(other.nuclides)
self_missing_nuclides = set(other.nuclides) - set(self.nuclides)
# Add nuclides present in self but not in other to other
for nuclide in other_missing_nuclides:
other._mean = np.insert(other.mean, other.num_nuclides, 0, axis=1)
other._std_dev = np.insert(other.std_dev, other.num_nuclides, 0,
axis=1)
other.nuclides.append(nuclide)
# Add nuclides present in other but not in self to self
for nuclide in self_missing_nuclides:
self._mean = np.insert(self.mean, self.num_nuclides, 0, axis=1)
self._std_dev = np.insert(self.std_dev, self.num_nuclides, 0,
axis=1)
self.nuclides.append(nuclide)
# Align other nuclides with self nuclides
for i, nuclide in enumerate(self.nuclides):
other_index = other.get_nuclide_index(nuclide)
# If necessary, swap other nuclide
if other_index != i:
other._swap_nuclides(nuclide, other.nuclides[i])
# Repeat and tile the data by score in preparation for performing
# the tensor product across scores.
if score_product == 'tensor':
self._mean = np.repeat(self.mean, other.num_scores, axis=2)
self._std_dev = np.repeat(self.std_dev, other.num_scores, axis=2)
other._mean = np.tile(other.mean, (1, 1, self.num_scores))
other._std_dev = np.tile(other.std_dev, (1, 1, self.num_scores))
# Add scores to each tally such that each tally contains the complete set
# of scores necessary to perform an entrywise product. New scores added
# to a tally will be set to zero.
else:
# Get the set of scores that each tally is missing
other_missing_scores = set(self.scores) - set(other.scores)
self_missing_scores = set(other.scores) - set(self.scores)
# Add scores present in self but not in other to other
for score in other_missing_scores:
other._mean = np.insert(other.mean, other.num_scores, 0, axis=2)
other._std_dev = np.insert(other.std_dev, other.num_scores, 0, axis=2)
other.scores.append(score)
# Add scores present in other but not in self to self
for score in self_missing_scores:
self._mean = np.insert(self.mean, self.num_scores, 0, axis=2)
self._std_dev = np.insert(self.std_dev, self.num_scores, 0, axis=2)
self.scores.append(score)
# Align other scores with self scores
for i, score in enumerate(self.scores):
other_index = other.scores.index(score)
# If necessary, swap other score
if other_index != i:
other._swap_scores(score, other.scores[i])
data = {}
data['self'] = {}
data['other'] = {}
data['self']['mean'] = self.mean
data['other']['mean'] = other.mean
data['self']['std. dev.'] = self.std_dev
data['other']['std. dev.'] = other.std_dev
return data
def _swap_filters(self, filter1, filter2):
"""Reverse the ordering of two filters in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared filters. This method reverses the order of
the two filters in place.
Parameters
----------
filter1 : Filter
The filter to swap with filter2
filter2 : Filter
The filter to swap with filter1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
cv.check_type('filter1', filter1, _FILTER_CLASSES)
cv.check_type('filter2', filter2, _FILTER_CLASSES)
# Check that the filters exist in the tally and are not the same
if filter1 == filter2:
return
elif filter1 not in self.filters:
msg = 'Unable to swap "{}" filter1 in Tally ID="{}" since it ' \
'does not contain such a filter'.format(filter1.type, self.id)
raise ValueError(msg)
elif filter2 not in self.filters:
msg = 'Unable to swap "{}" filter2 in Tally ID="{}" since it ' \
'does not contain such a filter'.format(filter2.type, self.id)
raise ValueError(msg)
# Construct lists of tuples for the bins in each of the two filters
filters = [type(filter1), type(filter2)]
if isinstance(filter1, openmc.DistribcellFilter):
filter1_bins = [b for b in range(filter1.num_bins)]
elif isinstance(filter1, openmc.EnergyFunctionFilter):
filter1_bins = [None]
else:
filter1_bins = filter1.bins
if isinstance(filter2, openmc.DistribcellFilter):
filter2_bins = [b for b in range(filter2.num_bins)]
elif isinstance(filter2, openmc.EnergyFunctionFilter):
filter2_bins = [None]
else:
filter2_bins = filter2.bins
# Create variables to store views of data in the misaligned structure
mean = {}
std_dev = {}
# Store the data from the misaligned structure
for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)):
filter_bins = [(bin1,), (bin2,)]
if self.mean is not None:
mean[i] = self.get_values(
filters=filters, filter_bins=filter_bins, value='mean')
if self.std_dev is not None:
std_dev[i] = self.get_values(
filters=filters, filter_bins=filter_bins, value='std_dev')
# Swap the filters in the copied version of this Tally
filter1_index = self.filters.index(filter1)
filter2_index = self.filters.index(filter2)
self.filters[filter1_index] = filter2
self.filters[filter2_index] = filter1
# Realign the data
for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)):
filter_bins = [(bin1,), (bin2,)]
indices = self.get_filter_indices(filters, filter_bins)
if self.mean is not None:
self.mean[indices, :, :] = mean[i]
if self.std_dev is not None:
self.std_dev[indices, :, :] = std_dev[i]
def _swap_nuclides(self, nuclide1, nuclide2):
"""Reverse the ordering of two nuclides in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared nuclides. This method reverses the order of
the two nuclides in place.
Parameters
----------
nuclide1 : Nuclide
The nuclide to swap with nuclide2
nuclide2 : Nuclide
The nuclide to swap with nuclide1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
cv.check_type('nuclide1', nuclide1, _NUCLIDE_CLASSES)
cv.check_type('nuclide2', nuclide2, _NUCLIDE_CLASSES)
# Check that the nuclides exist in the tally and are not the same
if nuclide1 == nuclide2:
msg = 'Unable to swap a nuclide with itself'
raise ValueError(msg)
elif nuclide1 not in self.nuclides:
msg = 'Unable to swap nuclide1 "{}" in Tally ID="{}" since it ' \
'does not contain such a nuclide'\
.format(nuclide1.name, self.id)
raise ValueError(msg)
elif nuclide2 not in self.nuclides:
msg = 'Unable to swap "{}" nuclide2 in Tally ID="{}" since it ' \
'does not contain such a nuclide'\
.format(nuclide2.name, self.id)
raise ValueError(msg)
# Swap the nuclides in the Tally
nuclide1_index = self.get_nuclide_index(nuclide1)
nuclide2_index = self.get_nuclide_index(nuclide2)
self.nuclides[nuclide1_index] = nuclide2
self.nuclides[nuclide2_index] = nuclide1
# Adjust the mean data array to relect the new nuclide order
if self.mean is not None:
nuclide1_mean = self.mean[:, nuclide1_index, :].copy()
nuclide2_mean = self.mean[:, nuclide2_index, :].copy()
self.mean[:, nuclide2_index, :] = nuclide1_mean
self.mean[:, nuclide1_index, :] = nuclide2_mean
# Adjust the std_dev data array to relect the new nuclide order
if self.std_dev is not None:
nuclide1_std_dev = self.std_dev[:, nuclide1_index, :].copy()
nuclide2_std_dev = self.std_dev[:, nuclide2_index, :].copy()
self.std_dev[:, nuclide2_index, :] = nuclide1_std_dev
self.std_dev[:, nuclide1_index, :] = nuclide2_std_dev
def _swap_scores(self, score1, score2):
"""Reverse the ordering of two scores in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared scores. This method reverses the order
of the two scores in place.
Parameters
----------
score1 : str or CrossScore
The score to swap with score2
score2 : str or CrossScore
The score to swap with score1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Check that the scores are valid
if not isinstance(score1, (str, openmc.CrossScore)):
msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it is ' \
'not a string or CrossScore'.format(score1, self.id)
raise ValueError(msg)
elif not isinstance(score2, (str, openmc.CrossScore)):
msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it is ' \
'not a string or CrossScore'.format(score2, self.id)
raise ValueError(msg)
# Check that the scores exist in the tally and are not the same
if score1 == score2:
msg = 'Unable to swap a score with itself'
raise ValueError(msg)
elif score1 not in self.scores:
msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it ' \
'does not contain such a score'.format(score1, self.id)
raise ValueError(msg)
elif score2 not in self.scores:
msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it ' \
'does not contain such a score'.format(score2, self.id)
raise ValueError(msg)
# Swap the scores in the Tally
score1_index = self.get_score_index(score1)
score2_index = self.get_score_index(score2)
self.scores[score1_index] = score2
self.scores[score2_index] = score1
# Adjust the mean data array to relect the new nuclide order
if self.mean is not None:
score1_mean = self.mean[:, :, score1_index].copy()
score2_mean = self.mean[:, :, score2_index].copy()
self.mean[:, :, score2_index] = score1_mean
self.mean[:, :, score1_index] = score2_mean
# Adjust the std_dev data array to relect the new nuclide order
if self.std_dev is not None:
score1_std_dev = self.std_dev[:, :, score1_index].copy()
score2_std_dev = self.std_dev[:, :, score2_index].copy()
self.std_dev[:, :, score2_index] = score1_std_dev
self.std_dev[:, :, score1_index] = score2_std_dev
def __add__(self, other):
"""Adds this tally to another tally or scalar value.
This method builds a new tally with data that is the sum of this
tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally which is the sum of this tally and the other
tally or scalar value in the addition.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='+')
# If both tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.with_batch_statistics = True
new_tally.name = self.name
new_tally._mean = self.mean + other
new_tally._std_dev = self.std_dev
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to add "{}" to Tally ID="{}"'.format(other, self.id)
raise ValueError(msg)
return new_tally
def __sub__(self, other):
"""Subtracts another tally or scalar value from this tally.
This method builds a new tally with data that is the difference of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to subtract from this tally
Returns
-------
openmc.Tally
A new derived tally which is the difference of this tally and the
other tally or scalar value in the subtraction.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='-')
# If both tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean - other
new_tally._std_dev = self.std_dev
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to subtract "{}" from Tally ID="{}"'.format(other, self.id)
raise ValueError(msg)
return new_tally
def __mul__(self, other):
"""Multiplies this tally with another tally or scalar value.
This method builds a new tally with data that is the product of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally which is the product of this tally and the
other tally or scalar value in the multiplication.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='*')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean * other
new_tally._std_dev = self.std_dev * np.abs(other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to multiply Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __truediv__(self, other):
"""Divides this tally by another tally or scalar value.
This method builds a new tally with data that is the dividend of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to divide this tally by
Returns
-------
openmc.Tally
A new derived tally which is the dividend of this tally and the
other tally or scalar value in the division.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='/')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean / other
new_tally._std_dev = self.std_dev * np.abs(1. / other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to divide Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __div__(self, other):
return self.__truediv__(other)
def __pow__(self, power):
"""Raises this tally to another tally or scalar value power.
This method builds a new tally with data that is the power of
this tally's data to that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
power : openmc.Tally or float
The tally or scalar value exponent
Returns
-------
openmc.Tally
A new derived tally which is this tally raised to the power of the
other tally or scalar value in the exponentiation.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(power, Tally):
new_tally = self.hybrid_product(power, binary_op='^')
# If original tally operand was sparse, sparsify the new tally
if self.sparse:
new_tally.sparse = True
elif isinstance(power, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self._mean ** power
self_rel_err = self.std_dev / self.mean
new_tally._std_dev = np.abs(new_tally._mean * power * self_rel_err)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If original tally was sparse, sparsify the exponentiated tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to raise Tally ID="{}" to power "{}"'.format(self.id, power)
raise ValueError(msg)
return new_tally
def __radd__(self, other):
"""Right addition with a scalar value.
This reverses the operands and calls the __add__ method.
Parameters
----------
other : float
The scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally of this tally added with the scalar value.
"""
return self + other
def __rsub__(self, other):
"""Right subtraction from a scalar value.
This reverses the operands and calls the __sub__ method.
Parameters
----------
other : float
The scalar value to subtract this tally from
Returns
-------
openmc.Tally
A new derived tally of this tally subtracted from the scalar value.
"""
return -1. * self + other
def __rmul__(self, other):
"""Right multiplication with a scalar value.
This reverses the operands and calls the __mul__ method.
Parameters
----------
other : float
The scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally of this tally multiplied by the scalar value.
"""
return self * other
def __rdiv__(self, other):
"""Right division with a scalar value.
This reverses the operands and calls the __div__ method.
Parameters
----------
other : float
The scalar value to divide by this tally
Returns
-------
openmc.Tally
A new derived tally of the scalar value divided by this tally.
"""
return other * self**-1
def __abs__(self):
"""The absolute value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the absolute value of this tally.
"""
new_tally = copy.deepcopy(self)
new_tally._mean = np.abs(new_tally.mean)
return new_tally
def __neg__(self):
"""The negated value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the negated value of this tally.
"""
new_tally = self * -1
return new_tally
def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[],
squeeze=False):
"""Build a sliced tally for the specified filters, scores and nuclides.
This method constructs a new tally to encapsulate a subset of the data
represented by this tally. The subset of data to include in the tally
slice is determined by the scores, filters and nuclides specified in
the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings (e.g., ['absorption',
'nu-fission']
filters : Iterable of openmc.FilterMeta
An iterable of filter types (e.g., [MeshFilter, EnergyFilter])
filter_bins : list of Iterables
A list of iterables of filter bins corresponding to the specified
filter types (e.g., [(1,), ((0., 0.625e-6),)]). Each iterable
contains bins to slice for the corresponding filter type in the
filters parameter. Each bin is the integer ID for 'material',
'surface', 'cell', 'cellborn', and 'universe' Filters. Each bin is
an integer for the cell instance ID for 'distribcell' Filters. Each
bin is a 2-tuple of floats for 'energy' and 'energyout' filters
corresponding to the energy boundaries of the bin of interest. The
bin is an (x,y,z) 3-tuple for 'mesh' filters corresponding to the
mesh cell of interest. The order of the bins in the list must
correspond to the `filters` argument.
nuclides : list of str
A list of nuclide name strings (e.g., ['U235', 'U238'])
squeeze : bool
Whether to remove filters with only a single bin in the sliced tally
Returns
-------
openmc.Tally
A new tally which encapsulates the subset of data requested in the
order each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Ensure that the tally has data
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Create deep copy of tally to return as sliced tally
new_tally = copy.deepcopy(self)
new_tally._derived = True
# Differentiate Tally with a new auto-generated Tally ID
new_tally.id = None
new_tally.sparse = False
if not self.derived and self.sum is not None:
new_sum = self.get_values(scores, filters, filter_bins,
nuclides, 'sum')
new_tally.sum = new_sum
if not self.derived and self.sum_sq is not None:
new_sum_sq = self.get_values(scores, filters, filter_bins,
nuclides, 'sum_sq')
new_tally.sum_sq = new_sum_sq
if self.mean is not None:
new_mean = self.get_values(scores, filters, filter_bins,
nuclides, 'mean')
new_tally._mean = new_mean
if self.std_dev is not None:
new_std_dev = self.get_values(scores, filters, filter_bins,
nuclides, 'std_dev')
new_tally._std_dev = new_std_dev
# SCORES
if scores:
score_indices = []
# Determine the score indices from any of the requested scores
for score in self.scores:
if score not in scores:
score_index = self.get_score_index(score)
score_indices.append(score_index)
# Loop over indices in reverse to remove excluded scores
for score_index in reversed(score_indices):
new_tally.remove_score(self.scores[score_index])
# NUCLIDES
if nuclides:
nuclide_indices = []
# Determine the nuclide indices from any of the requested nuclides
for nuclide in self.nuclides:
if nuclide.name not in nuclides:
nuclide_index = self.get_nuclide_index(nuclide.name)
nuclide_indices.append(nuclide_index)
# Loop over indices in reverse to remove excluded Nuclides
for nuclide_index in reversed(nuclide_indices):
new_tally.remove_nuclide(self.nuclides[nuclide_index])
# FILTERS
if filters:
# Determine the filter indices from any of the requested filters
for i, filter_type in enumerate(filters):
f = new_tally.find_filter(filter_type)
# Remove filters with only a single bin if requested
if squeeze:
if len(filter_bins[i]) == 1:
new_tally.filters.remove(f)
continue
else:
raise RuntimeError('Cannot remove sliced filter with '
'more than one bin.')
# Remove and/or reorder filter bins to user specifications
bin_indices = [f.get_bin_index(b)
for b in filter_bins[i]]
bin_indices = np.unique(bin_indices)
# Set bins for sliced filter
new_filter = copy.copy(f)
new_filter.bins = [f.bins[i] for i in bin_indices]
# Set number of bins manually for mesh/distribcell filters
if filter_type is openmc.DistribcellFilter:
new_filter._num_bins = f._num_bins
# Replace existing filter with new one
for j, test_filter in enumerate(new_tally.filters):
if isinstance(test_filter, filter_type):
new_tally.filters[j] = new_filter
# If original tally was sparse, sparsify the sliced tally
new_tally.sparse = self.sparse
return new_tally
def summation(self, scores=[], filter_type=None,
filter_bins=[], nuclides=[], remove_filter=False):
"""Vectorized sum of tally data across scores, filter bins and/or
nuclides using tally aggregation.
This method constructs a new tally to encapsulate the sum of the data
represented by the summation of the data in this tally. The tally data
sum is determined by the scores, filter bins and nuclides specified
in the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings to sum across
(e.g., ['absorption', 'nu-fission']; default is [])
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
filter_bins : Iterable of int or tuple
A list of the filter bins corresponding to the filter_type parameter
Each bin in the list is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. Each bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
nuclides : list of str
A list of nuclide name strings to sum across
(e.g., ['U235', 'U238']; default is [])
remove_filter : bool
If a filter is being summed over, this bool indicates whether to
remove that filter in the returned tally. Default is False.
Returns
-------
openmc.Tally
A new tally which encapsulates the sum of data requested.
"""
# Create new derived Tally for summation
tally_sum = Tally()
tally_sum._derived = True
tally_sum._estimator = self.estimator
tally_sum._num_realizations = self.num_realizations
tally_sum._with_batch_statistics = self.with_batch_statistics
tally_sum._with_summary = self.with_summary
tally_sum._sp_filename = self._sp_filename
tally_sum._results_read = self._results_read
# Get tally data arrays reshaped with one dimension per filter
mean = self.get_reshaped_data(value='mean')
std_dev = self.get_reshaped_data(value='std_dev')
# Sum across any filter bins specified by the user
if isinstance(filter_type, openmc.FilterMeta):
find_filter = self.find_filter(filter_type)
# If user did not specify filter bins, sum across all bins
if len(filter_bins) == 0:
bin_indices = np.arange(find_filter.num_bins)
if isinstance(find_filter, openmc.DistribcellFilter):
filter_bins = np.arange(find_filter.num_bins)
elif isinstance(find_filter, openmc.EnergyFunctionFilter):
filter_bins = [None]
else:
filter_bins = find_filter.bins
# Only sum across bins specified by the user
else:
bin_indices = \
[find_filter.get_bin_index(bin) for bin in filter_bins]
# Sum across the bins in the user-specified filter
for i, self_filter in enumerate(self.filters):
if type(self_filter) == filter_type:
shape = mean.shape
mean = np.take(mean, indices=bin_indices, axis=i)
std_dev = np.take(std_dev, indices=bin_indices, axis=i)
# NumPy take introduces a new dimension in output array
# for some special cases that must be removed
if len(mean.shape) > len(shape):
mean = np.squeeze(mean, axis=i)
std_dev = np.squeeze(std_dev, axis=i)
mean = np.sum(mean, axis=i, keepdims=True)
std_dev = np.sum(std_dev**2, axis=i, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateFilter to the tally sum
if not remove_filter:
filter_sum = openmc.AggregateFilter(self_filter,
[tuple(filter_bins)], 'sum')
tally_sum.filters.append(filter_sum)
# Add a copy of each filter not summed across to the tally sum
else:
tally_sum.filters.append(copy.deepcopy(self_filter))
# Add a copy of this tally's filters to the tally sum
else:
tally_sum._filters = copy.deepcopy(self.filters)
# Sum across any nuclides specified by the user
if len(nuclides) != 0:
nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides]
axis_index = self.num_filters
mean = np.take(mean, indices=nuclide_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index)
mean = np.sum(mean, axis=axis_index, keepdims=True)
std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateNuclide to the tally sum
nuclide_sum = openmc.AggregateNuclide(nuclides, 'sum')
tally_sum.nuclides.append(nuclide_sum)
# Add a copy of this tally's nuclides to the tally sum
else:
tally_sum._nuclides = copy.deepcopy(self.nuclides)
# Sum across any scores specified by the user
if len(scores) != 0:
score_bins = [self.get_score_index(score) for score in scores]
axis_index = self.num_filters + 1
mean = np.take(mean, indices=score_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=score_bins, axis=axis_index)
mean = np.sum(mean, axis=axis_index, keepdims=True)
std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateScore to the tally sum
score_sum = openmc.AggregateScore(scores, 'sum')
tally_sum.scores.append(score_sum)
# Add a copy of this tally's scores to the tally sum
else:
tally_sum._scores = copy.deepcopy(self.scores)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally_sum.shape)
std_dev = np.reshape(std_dev, tally_sum.shape)
# Assign tally sum's data with the new arrays
tally_sum._mean = mean
tally_sum._std_dev = std_dev
# If original tally was sparse, sparsify the tally summation
tally_sum.sparse = self.sparse
return tally_sum
def average(self, scores=[], filter_type=None,
filter_bins=[], nuclides=[], remove_filter=False):
"""Vectorized average of tally data across scores, filter bins and/or
nuclides using tally aggregation.
This method constructs a new tally to encapsulate the average of the
data represented by the average of the data in this tally. The tally
data average is determined by the scores, filter bins and nuclides
specified in the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings to average across
(e.g., ['absorption', 'nu-fission']; default is [])
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
filter_bins : Iterable of int or tuple
A list of the filter bins corresponding to the filter_type parameter
Each bin in the list is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. Each bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
nuclides : list of str
A list of nuclide name strings to average across
(e.g., ['U235', 'U238']; default is [])
remove_filter : bool
If a filter is being averaged over, this bool indicates whether to
remove that filter in the returned tally. Default is False.
Returns
-------
openmc.Tally
A new tally which encapsulates the average of data requested.
"""
# Create new derived Tally for average
tally_avg = Tally()
tally_avg._derived = True
tally_avg._estimator = self.estimator
tally_avg._num_realizations = self.num_realizations
tally_avg._with_batch_statistics = self.with_batch_statistics
tally_avg._with_summary = self.with_summary
tally_avg._sp_filename = self._sp_filename
tally_avg._results_read = self._results_read
# Get tally data arrays reshaped with one dimension per filter
mean = self.get_reshaped_data(value='mean')
std_dev = self.get_reshaped_data(value='std_dev')
# Average across any filter bins specified by the user
if isinstance(filter_type, openmc.FilterMeta):
find_filter = self.find_filter(filter_type)
# If user did not specify filter bins, average across all bins
if len(filter_bins) == 0:
bin_indices = np.arange(find_filter.num_bins)
if isinstance(find_filter, openmc.DistribcellFilter):
filter_bins = np.arange(find_filter.num_bins)
elif isinstance(find_filter, openmc.EnergyFunctionFilter):
filter_bins = [None]
else:
filter_bins = find_filter.bins
# Only average across bins specified by the user
else:
bin_indices = \
[find_filter.get_bin_index(bin) for bin in filter_bins]
# Average across the bins in the user-specified filter
for i, self_filter in enumerate(self.filters):
if isinstance(self_filter, filter_type):
shape = mean.shape
mean = np.take(mean, indices=bin_indices, axis=i)
std_dev = np.take(std_dev, indices=bin_indices, axis=i)
# NumPy take introduces a new dimension in output array
# for some special cases that must be removed
if len(mean.shape) > len(shape):
mean = np.squeeze(mean, axis=i)
std_dev = np.squeeze(std_dev, axis=i)
mean = np.nanmean(mean, axis=i, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=i, keepdims=True)
std_dev /= len(bin_indices)
std_dev = np.sqrt(std_dev)
# Add AggregateFilter to the tally avg
if not remove_filter:
filter_sum = openmc.AggregateFilter(self_filter,
[tuple(filter_bins)], 'avg')
tally_avg.filters.append(filter_sum)
# Add a copy of each filter not averaged across to the tally avg
else:
tally_avg.filters.append(copy.deepcopy(self_filter))
# Add a copy of this tally's filters to the tally avg
else:
tally_avg._filters = copy.deepcopy(self.filters)
# Sum across any nuclides specified by the user
if len(nuclides) != 0:
nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides]
axis_index = self.num_filters
mean = np.take(mean, indices=nuclide_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index)
mean = np.nanmean(mean, axis=axis_index, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True)
std_dev /= len(nuclide_bins)
std_dev = np.sqrt(std_dev)
# Add AggregateNuclide to the tally avg
nuclide_avg = openmc.AggregateNuclide(nuclides, 'avg')
tally_avg.nuclides.append(nuclide_avg)
# Add a copy of this tally's nuclides to the tally avg
else:
tally_avg._nuclides = copy.deepcopy(self.nuclides)
# Sum across any scores specified by the user
if len(scores) != 0:
score_bins = [self.get_score_index(score) for score in scores]
axis_index = self.num_filters + 1
mean = np.take(mean, indices=score_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=score_bins, axis=axis_index)
mean = np.nanmean(mean, axis=axis_index, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True)
std_dev /= len(score_bins)
std_dev = np.sqrt(std_dev)
# Add AggregateScore to the tally avg
score_sum = openmc.AggregateScore(scores, 'avg')
tally_avg.scores.append(score_sum)
# Add a copy of this tally's scores to the tally avg
else:
tally_avg._scores = copy.deepcopy(self.scores)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally_avg.shape)
std_dev = np.reshape(std_dev, tally_avg.shape)
# Assign tally avg's data with the new arrays
tally_avg._mean = mean
tally_avg._std_dev = std_dev
# If original tally was sparse, sparsify the tally average
tally_avg.sparse = self.sparse
return tally_avg
def diagonalize_filter(self, new_filter, filter_position=-1):
"""Diagonalize the tally data array along a new axis of filter bins.
This is a helper method for the tally arithmetic methods. This method
adds the new filter to a derived tally constructed copied from this one.
The data in the derived tally arrays is "diagonalized" along the bins in
the new filter. This functionality is used by the openmc.mgxs module; to
transport-correct scattering matrices by subtracting a 'scatter-P1'
reaction rate tally with an energy filter from a 'scatter' reaction
rate tally with both energy and energyout filters.
Parameters
----------
new_filter : Filter
The filter along which to diagonalize the data in the new
filter_position : int
Where to place the new filter in the Tally.filters list. Defaults
to last position.
Returns
-------
openmc.Tally
A new derived Tally with data diagaonalized along the new filter.
"""
cv.check_type('new_filter', new_filter, _FILTER_CLASSES)
cv.check_type('filter_position', filter_position, Integral)
if new_filter in self.filters:
msg = 'Unable to diagonalize Tally ID="{}" which already ' \
'contains a "{}" filter'.format(self.id, type(new_filter))
raise ValueError(msg)
# Add the new filter to a copy of this Tally
new_tally = copy.deepcopy(self)
new_tally.filters.insert(filter_position, new_filter)
# Determine "base" indices along the new "diagonal", and the factor
# by which the "base" indices should be repeated to account for all
# other filter bins in the diagonalized tally
indices = np.arange(0, new_filter.num_bins**2, new_filter.num_bins+1)
diag_factor = self.num_filter_bins // new_filter.num_bins
diag_indices = np.zeros(self.num_filter_bins, dtype=int)
# Determine the filter indices along the new "diagonal"
for i in range(diag_factor):
start = i * new_filter.num_bins
end = (i+1) * new_filter.num_bins
diag_indices[start:end] = indices + (i * new_filter.num_bins**2)
# Inject this Tally's data along the diagonal of the diagonalized Tally
if not self.derived and self.sum is not None:
new_tally._sum = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._sum[diag_indices, :, :] = self.sum
if not self.derived and self.sum_sq is not None:
new_tally._sum_sq = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._sum_sq[diag_indices, :, :] = self.sum_sq
if self.mean is not None:
new_tally._mean = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._mean[diag_indices, :, :] = self.mean
if self.std_dev is not None:
new_tally._std_dev = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._std_dev[diag_indices, :, :] = self.std_dev
# If original tally was sparse, sparsify the diagonalized tally
new_tally.sparse = self.sparse
return new_tally
class Tallies(cv.CheckedList):
"""Collection of Tallies used for an OpenMC simulation.
This class corresponds directly to the tallies.xml input file. It can be
thought of as a normal Python list where each member is a :class:`Tally`. It
behaves like a list as the following example demonstrates:
>>> t1 = openmc.Tally()
>>> t2 = openmc.Tally()
>>> t3 = openmc.Tally()
>>> tallies = openmc.Tallies([t1])
>>> tallies.append(t2)
>>> tallies += [t3]
Parameters
----------
tallies : Iterable of openmc.Tally
Tallies to add to the collection
"""
def __init__(self, tallies=None):
super().__init__(Tally, 'tallies collection')
if tallies is not None:
self += tallies
def append(self, tally, merge=False):
"""Append tally to collection
Parameters
----------
tally : openmc.Tally
Tally to append
merge : bool
Indicate whether the tally should be merged with an existing tally,
if possible. Defaults to False.
"""
if not isinstance(tally, Tally):
msg = 'Unable to add a non-Tally "{}" to the ' \
'Tallies instance'.format(tally)
raise TypeError(msg)
if merge:
merged = False
# Look for a tally to merge with this one
for i, tally2 in enumerate(self):
# If a mergeable tally is found
if tally2.can_merge(tally):
# Replace tally2 with the merged tally
merged_tally = tally2.merge(tally)
self[i] = merged_tally
merged = True
break
# If no mergeable tally was found, simply add this tally
if not merged:
super().append(tally)
else:
super().append(tally)
def insert(self, index, item):
"""Insert tally before index
Parameters
----------
index : int
Index in list
item : openmc.Tally
Tally to insert
"""
super().insert(index, item)
def merge_tallies(self):
"""Merge any mergeable tallies together. Note that n-way merges are
possible.
"""
for i, tally1 in enumerate(self):
for j, tally2 in enumerate(self):
# Do not merge the same tally with itself
if i == j:
continue
# If the two tallies are mergeable
if tally1.can_merge(tally2):
# Replace tally 1 with the merged tally
merged_tally = tally1.merge(tally2)
self[i] = merged_tally
# Remove tally 2 since it is no longer needed
self.pop(j)
# Continue iterating from the first loop
break
def _create_tally_subelements(self, root_element):
for tally in self:
root_element.append(tally.to_xml_element())
def _create_mesh_subelements(self, root_element):
already_written = set()
for tally in self:
for f in tally.filters:
if isinstance(f, openmc.MeshFilter):
if f.mesh.id not in already_written:
if len(f.mesh.name) > 0:
root_element.append(ET.Comment(f.mesh.name))
root_element.append(f.mesh.to_xml_element())
already_written.add(f.mesh.id)
def _create_filter_subelements(self, root_element):
already_written = dict()
for tally in self:
for f in tally.filters:
if f not in already_written:
root_element.append(f.to_xml_element())
already_written[f] = f.id
elif f.id != already_written[f]:
# Set the IDs of identical filters with different
# user-defined IDs to the same value
f.id = already_written[f]
def _create_derivative_subelements(self, root_element):
# Get a list of all derivatives referenced in a tally.
derivs = []
for tally in self:
deriv = tally.derivative
if deriv is not None and deriv not in derivs:
derivs.append(deriv)
# Add the derivatives to the XML tree.
for d in derivs:
root_element.append(d.to_xml_element())
def export_to_xml(self, path='tallies.xml'):
"""Create a tallies.xml file that can be used for a simulation.
Parameters
----------
path : str
Path to file to write. Defaults to 'tallies.xml'.
"""
root_element = ET.Element("tallies")
self._create_mesh_subelements(root_element)
self._create_filter_subelements(root_element)
self._create_tally_subelements(root_element)
self._create_derivative_subelements(root_element)
# Clean the indentation in the file to be user-readable
clean_indentation(root_element)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'tallies.xml'
# Write the XML Tree to the tallies.xml file
reorder_attributes(root_element) # TODO: Remove when support is Python 3.8+
tree = ET.ElementTree(root_element)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
| mit |
zbarge/zeex | zeex/core/views/file.py | 1 | 6522 | """
MIT License
Copyright (c) 2016 Zeke Barge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import zeex.core.utility.pandatools as pandatools
from zeex.core.compat import QtGui
from zeex.core.ui.file_ui import Ui_FileWindow
from zeex.core.utility.widgets import create_standard_item_model
from zeex.core.views.actions.analyze import FileAnalyzerDialog
from zeex.core.views.actions.export import DataFrameModelExportDialog
from zeex.core.views.actions.fields_edit import FieldsEditDialog
from zeex.core.views.actions.merge_purge import MergePurgeDialog
from zeex.core.views.actions.normalize import ColumnNormalizerDialog
from zeex.core.views.actions.split import SplitFileDialog
from zeex.icons import icons_rc
from zeex.core.ctrls.dataframe import DataFrameModel
from qtpandas.views.DataTableView import DataTableWidget
class FileTableWindow(QtGui.QMainWindow, Ui_FileWindow):
"""
A spreadsheet-like window that displays rows and columns
of the source DataFrame. Menu actions in this window allow the user to make
updates to the DataFrame and see the changes update in the view.
"""
def __init__(self, model: DataFrameModel, df_manager, **kwargs):
QtGui.QMainWindow.__init__(self, parent=kwargs.pop('parent', None))
self.df_manager = df_manager
self._df_model = None
self._df_model_transposed = None
self._view_transposed = False
self._widget = DataTableWidget()
self._widget.setModel(model)
kwargs['parent'] = self
self.setupUi(self)
self.dialog_fields_edit = FieldsEditDialog(model, parent=self)
self.dialog_export = DataFrameModelExportDialog(df_manager, filename=model.filePath,
allow_multi_source=False, parent=self)
self.dialog_split = SplitFileDialog(model, parent=self)
self.dialog_analyze = FileAnalyzerDialog(model, parent=self)
self.dialog_normalize = ColumnNormalizerDialog(model, parent=self)
self.dialog_merge_purge = kwargs.pop('merge_purge_dialog', MergePurgeDialog(df_manager,
source_model=model,
))
self.connect_actions()
self.connect_icons()
@property
def widget(self):
# Overrides the Ui_FileWindow.widget
return self._widget
@widget.setter
def widget(self, x):
# Prevent the Ui_FileWindow from overriding our widget.
pass
@property
def df_model(self) -> DataFrameModel:
return self.widget.model()
@property
def df(self):
return self.df_model.dataFrame()
def connect_actions(self):
self.actionAnalyze.triggered.connect(self.dialog_analyze.show)
self.actionDropNaN.triggered.connect(self.drop_nan)
self.actionEditFields.triggered.connect(self.dialog_fields_edit.show)
self.actionMergePurge.triggered.connect(self.dialog_merge_purge.show)
self.actionNormalize.triggered.connect(self.dialog_normalize.show)
self.actionSave.triggered.connect(self.save)
self.actionSaveAs.triggered.connect(self.dialog_export.show)
self.actionSplit.triggered.connect(self.dialog_split.show)
self.actionTranspose.triggered.connect(self.transpose)
self.dialog_export.btnBrowseSource.setVisible(False)
# TODO: Make these actions do something then activate.
self.actionExecuteScript.setVisible(False)
self.actionSuppress.setVisible(False)
self.actionDelete.setVisible(False)
self.df_model.dataChanged.connect(self.sync)
self.sync()
def connect_icons(self):
self.setWindowIcon(QtGui.QIcon(':/standard_icons/spreadsheet.png'))
def transpose(self):
"""
TODO: Give this functionality to the DataFrameModel
:return:
"""
rows = self.df_model.dataFrame().index.size
if rows > 150:
raise Exception("Max size to transpose is 150 rows to columns.")
if self._df_model_transposed is None:
df = pandatools.dataframe_transpose(self.df_model.dataFrame())
self._df_model_transposed = DataFrameModel(dataFrame=df)
self._df_model = self.df_model
if self._view_transposed is True:
self.widget.setModel(self._df_model)
self._view_transposed = False
else:
self.widget.setModel(self._df_model_transposed)
self._view_transposed = True
def drop_nan(self):
for c in self.df.columns:
dtype = str(self.df[c].dtype)
if 'float' in dtype:
sum = self.df[c].sum()
if sum == 0 or str(sum) == 'nan':
self.df.loc[:, c] = self.df.loc[:, c].apply(lambda x: str(x))
if str(self.df[c].dtype) == 'object':
self.df.loc[:, c] = pandatools.series_blank_na(self.df.loc[:, c])
self.df_model.dataChanged.emit()
def sync(self):
fp = self.df_model.filePath
self.setWindowTitle(fp)
cb_model = create_standard_item_model([fp])
self.dialog_export.comboBoxSource.setModel(cb_model)
self._df_model_transposed = None
if self._df_model is not None:
self.widget.setModel(self._df_model)
def save(self):
self.dialog_export.set_destination_path_from_source()
self.dialog_export.export()
| mit |
mapazarr/astropy_scripts | astropy_scripts/test_prod_bg_cube_models.py | 1 | 6735 | from __future__ import (absolute_import, division, print_function,
unicode_literals) # python 2 as python 3
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.units import Quantity
from astropy.coordinates import Angle
from gammapy.scripts import make_bg_cube_models
from gammapy.background import Cube, CubeBackgroundModel
from gammapy.obs import ObservationGroups
from gammapy.datasets import make_test_dataset
DEBUG = 0 # 0: normal, 1: run fast (test mode)
GRAPH_DEBUG = 1 # 0: no plots, 1: make plots, 2: wait between steps (bins), 3: draw 3D scatter plots (not implemented)
CLEAN_WORKING_DIR = 1 # remove existing observation and bg cube model files
USE_DUMMY_DATA = 0 # to use dummy dataset
METHOD = 'default' # to use the default method to produce the cubes
#METHOD = 'michi' # to use Michael Mayer's method to produce the cubes
HESSFITS_MPP = '/home/mapaz/astropy/gammapy_tutorial/HESS_fits_data/pa/Model_Deconvoluted_Prod26/Mpp_Std'
DUMMYFITS = '/home/mapaz/astropy/development_code/astropy_scripts/astropy_scripts/' + 'test_dataset'
SCHEME = 'HESS'
OBSERVATORY_NAME = 'HESS' # in case USE_DUMMY_DATA is activated
def bg_cube_models_debug_plots(indir):
"""Make some debug plots of the generated background cube models.
Parameters
----------
indir : str
Dir path where results are stored.
"""
# TODO: call plot_bg_cube_model_comparison !!!
print()
print("#######################################")
print("# Starting bg_cube_models_debug_plots #")
print("#######################################")
# read observation grouping
infile = os.path.join(indir, 'bg_observation_groups.ecsv')
observation_groups = ObservationGroups.read(infile)
# loop over observation groups
groups = observation_groups.list_of_groups
print()
print("list of groups", groups)
for group in groups:
print()
print("group", group)
# read bg cube model from file
infile = os.path.join(indir,
'/bg_cube_model_group{}_table.fits.gz'.format(group))
# skip bins with no bg cube model file
if not os.path.isfile(infile):
print("WARNING, file not found: {}".format(infile))
continue # skip the rest
#bg_cube_model = Cube.read(infile, format='table', scheme='bg_cube')
bg_cube_model = CubeBackgroundModel.read(filename, format='table').background_cube
# TODO: I could actually plot also the events (counts) or livetime cubes!!!
fig, axes = plt.subplots(nrows=1, ncols=3)
fig.set_size_inches(30., 8., forward=True)
#plt.suptitle('altitude bin {0} azimuth bin {1}'.format(i_alt, i_az))
#plt.suptitle('group {}'.format(group))
group_info = observation_groups.info_group(group)
plt.suptitle(group_info)
# TODO: it would be nice to get a nice string from an obs group!!!
# and in this case pack it in the figure title
# I think this also applies to the script for comparing plots!!!
# plot images
#bg_cube_model.plot_image(energy=Quantity(0.5, 'TeV'), ax=axes[0])
bg_cube_model.plot_image(energy=Quantity(5., 'TeV'), ax=axes[0])
bg_cube_model.plot_image(energy=Quantity(50., 'TeV'), ax=axes[1])
# plot spectra
bg_cube_model.plot_spectrum(coord=Angle([0., 0.], 'degree'), ax=axes[2],
style_kwargs=dict(color='blue',
label='(0, 0) deg'))
bg_cube_model.plot_spectrum(coord=Angle([2., 2.], 'degree'), ax=axes[2],
style_kwargs=dict(color='red',
label='(2, 2) deg'))
axes[2].set_title('')
axes[2].legend()
#plt.tight_layout()
plt.draw()
if GRAPH_DEBUG > 1:
plt.show() # wait until image is closed
if GRAPH_DEBUG:
plt.show() # don't leave at the end
def test_make_bg_cube_models():
"""
gammapy-make-bg-cube-models -h
gammapy-make-bg-cube-models /home/mapaz/astropy/gammapy_tutorial/HESS_fits_data/pa/Model_Deconvoluted_Prod26/Mpp_Std HESS bg_cube_models
gammapy-make-bg-cube-models /home/mapaz/astropy/gammapy_tutorial/HESS_fits_data/pa/Model_Deconvoluted_Prod26/Mpp_Std HESS bg_cube_models --test
gammapy-make-bg-cube-models /home/mapaz/astropy/gammapy_tutorial/HESS_fits_data/pa/Model_Deconvoluted_Prod26/Mpp_Std HESS bg_cube_models --test --overwrite
gammapy-make-bg-cube-models /home/mapaz/astropy/gammapy_tutorial/HESS_fits_data/pa/Model_Deconvoluted_Prod26/Mpp_Std HESS bg_cube_models --a-la-michi
"""
# remove old files
# make sure the working dir is clean, otherwise old
# files could be mixed up in the new models!
if CLEAN_WORKING_DIR:
print("Cleaning working dir.")
#command = "rm bg_observation_table* bg_cube_model_alt* -fr"
#command = "rm bg_observation_table.fits.gz splitted_obs_list/ bg_cube_models/ -fr"
#command = "rm bg_observation_table.fits.gz bg_observation_groups.ecsv bg_observation_table_grouped.fits.gz bg_cube_models/ -fr"
command = "rm bg_cube_models/ -fr"
print(command)
os.system(command)
if USE_DUMMY_DATA:
print("Deleting dummy data dir.")
command = "rm test_dataset -fr"
print(command)
os.system(command)
test = False
if DEBUG:
# run fast (test mode)
test = True
fits_path = HESSFITS_MPP
#outdir = os.path.join(os.environ['PWD'], 'bg_cube_models')
outdir = 'bg_cube_models'
overwrite = False
if USE_DUMMY_DATA:
# update fits path and generate dataset
fits_path = DUMMYFITS
n_obs = 10
if DEBUG:
# run fast (test mode)
n_obs = 2
datestart = None
dateend = None
#random_state = 'random-seed'
random_state = np.random.RandomState(seed=0)
#random_state = 0 # this is equivalent
make_test_dataset(fits_path=fits_path,
overwrite=overwrite,
observatory_name=OBSERVATORY_NAME,
n_obs=n_obs,
datestart=datestart,
dateend=dateend,
random_state=random_state)
make_bg_cube_models(fitspath=fits_path, scheme=SCHEME, outdir=outdir, overwrite=overwrite, test=test, method=METHOD)
if GRAPH_DEBUG:
# check model: do some plots
bg_cube_models_debug_plots(indir=outdir)
if __name__ == '__main__':
test_make_bg_cube_models()
| gpl-2.0 |
murali-munna/scikit-learn | sklearn/utils/validation.py | 67 | 24013 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
zeratul2099/plist-qt | listview.py | 1 | 10898 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
from puente.plist.models import Customer, PriceList, PlistSettings, Transaction
from puente.plist.views import renderPlot
from puente.pmenu.models import MenuItem
if os.environ.get('QT_API') == 'pyside':
from PySide.QtCore import *
from PySide.QtGui import *
else:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
from datetime import datetime, timedelta, date
from decimal import Decimal
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
from primitives import *
from main_elements import *
from dialogs import *
class MainWindow(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowIcon(QIcon('img/32x32/wallet-open.png'))
self.setWindowTitle('PList-QT')
self._update_weekly_sales()
self.customers = Customer.objects.filter(isPuente=False).order_by('name').reverse()
self.prices = PriceList.objects.filter(isPuente=False).order_by('price')
self.p_men = Customer.objects.filter(isPuente=True).order_by('name').reverse()
self.p_prices = PriceList.objects.filter(isPuente=True).order_by('price')
self.c_menu_items = dict()
self.p_menu_items = dict()
self._get_menu_item_dict()
self.settings = PlistSettings.objects.all()[0]
layout = QVBoxLayout()
self.center_widget = QWidget(parent=self)
self.center_widget.resize(1024,600)
self.toolbar = PlistToolbar()
self.p_men_box = CustomerListBlockWidget(self.p_men, self.p_prices, 'Puente', self.settings, product_dict=self.p_menu_items)
self.customer_box = CustomerListBlockWidget(self.customers, self.prices, 'Customer', self.settings, product_dict=self.c_menu_items)
self.p_men_box.table.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Preferred)
self.p_men_box.table.adjustSize()
self.connect(self.toolbar.new_customer_dialog, SIGNAL('newCustomer()'), self.update)
self.connect(self.toolbar.settings_dialog, SIGNAL('settingsChanged()'), self.settings_changed)
self.connect(self.toolbar.settings_dialog.c_price_widget.add_button, SIGNAL('clicked()'), self.add_price)
self.connect(self.toolbar.settings_dialog.p_price_widget.add_button, SIGNAL('clicked()'), self.add_price)
self.connect(self.toolbar.settings_dialog.c_price_widget, SIGNAL('settingsChanged()'), self.update)
self.connect(self.toolbar.settings_dialog.p_price_widget, SIGNAL('settingsChanged()'), self.update)
self.connect(self.toolbar.menu_dialog.table, SIGNAL('settingsChanged()'), self.update)
self.connect(self.p_men_box.table, SIGNAL('customerDeleted()'), self.update)
self.connect(self.customer_box.table, SIGNAL('customerDeleted()'), self.update)
self.connect(self.p_men_box.table, SIGNAL('customerChanged()'), self.p_men_box.details_dialog.customer_updated)
self.connect(self.customer_box.table, SIGNAL('customerChanged()'), self.customer_box.details_dialog.customer_updated)
self.connect(self.p_men_box.details_dialog, SIGNAL('customerEdited()'), self.update)
self.connect(self.customer_box.details_dialog, SIGNAL('customerEdited()'), self.update)
layout.addWidget(self.toolbar)
splitter = QSplitter()
splitter.setOrientation(Qt.Vertical)
splitter.addWidget(self.p_men_box)
splitter.addWidget(self.customer_box)
layout.addWidget(splitter)
self.center_widget.setLayout(layout)
def add_price(self):
if self.sender() is self.toolbar.settings_dialog.c_price_widget.add_button:
is_puente = False
num = float(self.toolbar.settings_dialog.c_price_widget.new_price_field.text().replace(',','.'))*100
self.toolbar.settings_dialog.c_price_widget.new_price_field.setText('')
elif self.sender() is self.toolbar.settings_dialog.p_price_widget.add_button:
is_puente = True
num = float(self.toolbar.settings_dialog.p_price_widget.new_price_field.text().replace(',','.'))*100
self.toolbar.settings_dialog.p_price_widget.new_price_field.setText('')
else:
return
price = PriceList(price=num, isPuente=is_puente, settings=self.settings)
price.save()
self.update()
self.toolbar.settings_dialog.update(self.settings, self.prices, self.p_prices)
def resizeEvent(self, event):
self.center_widget.resize(event.size())
def update(self):
self.customers = Customer.objects.filter(isPuente=False).order_by('name').reverse()
self.prices = PriceList.objects.filter(isPuente=False).order_by('price')
self.p_men = Customer.objects.filter(isPuente=True).order_by('name').reverse()
self.p_prices = PriceList.objects.filter(isPuente=True).order_by('price')
self.settings = PlistSettings.objects.all()[0]
self._get_menu_item_dict()
self.p_men_box.update(self.p_men, self.p_prices, self.settings, self.p_menu_items)
self.customer_box.update(self.customers, self.prices, self.settings, self.c_menu_items)
#self.p_men_box.table.adjustSize()
def settings_changed(self):
self.update()
for c in self.customers:
self.customer_box.table.update_customer_status(c)
for p in self.p_men:
self.p_men_box.table.update_customer_status(p)
def _get_menu_item_dict(self):
self.c_menu_items = dict()
self.p_menu_items = dict()
for item in MenuItem.objects.filter(available=True):
if item.price in self.c_menu_items:
self.c_menu_items[item.price].append(item.name)
else:
self.c_menu_items[item.price] = [item.name]
if item.pPrice in self.p_menu_items:
self.p_menu_items[item.pPrice].append(item.name)
else:
self.p_menu_items[item.pPrice] = [item.name]
def _update_weekly_sales(self):
''' update the weeklySales attribute of every customer. Once every program startup should be enough '''
for c in Customer.objects.all():
if date.today() - c.salesSince > timedelta(7):
while c.salesSince + timedelta(7) < date.today():
c.salesSince = c.salesSince + timedelta(7)
c.weeklySales = 0
c.save()
class PlistToolbar(QToolBar):
def __init__(self):
QToolBar.__init__(self)
self.new_customer_dialog = NewCustomerDialog()
self.all_stats_dialog = StatsDialog()
self.customer_stats_dialog = StatsDialog()
self.team_stats_dialog = StatsDialog()
self.settings_dialog = SettingsDialog()
self.menu_dialog = MenuEditDialog()
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
new_customer_action = QAction(QIcon('img/16x16/user-group-new.png'), 'New customer',self)
self.connect(new_customer_action, SIGNAL('triggered()'), self.new_customer_dialog.show)
self.addAction(new_customer_action)
show_customer_stats_action = QAction(QIcon.fromTheme('office-chart-bar', QIcon('img/16x16/view-statistics.png')), 'Statistics customer', self)
self.connect(show_customer_stats_action, SIGNAL('triggered()'), self.show_customer_stats)
self.addAction(show_customer_stats_action)
show_team_stats_action = QAction(QIcon.fromTheme('office-chart-bar', QIcon('img/16x16/view-statistics.png')), 'Statistics team', self)
self.connect(show_team_stats_action, SIGNAL('triggered()'), self.show_team_stats)
self.addAction(show_team_stats_action)
show_all_stats_action = QAction(QIcon.fromTheme('office-chart-bar', QIcon('img/16x16/view-statistics.png')), 'Statistics sum', self)
self.connect(show_all_stats_action, SIGNAL('triggered()'), self.show_all_stats)
self.addAction(show_all_stats_action)
show_settings_action = QAction(QIcon('img/16x16/configure.png'), 'Settings', self)
self.connect(show_settings_action, SIGNAL('triggered()'), self.show_settings)
self.addAction(show_settings_action)
show_menu_Edit_action = QAction(QIcon('img/16x16/wine.png'), 'Menu', self)
self.connect(show_menu_Edit_action, SIGNAL('triggered()'), self.show_menu_edit)
self.addAction(show_menu_Edit_action)
show_about_action = QAction(QIcon.fromTheme('help-about'), 'About', self)
self.connect(show_about_action, SIGNAL('triggered()'), self.show_about)
#self.addAction(show_about_action)
def show_about(self):
self.about_dialog = QDialog()
QLabel('Plist-QT', parent=self.about_dialog)
self.about_dialog.show()
def show_settings(self):
main_window = self.parent().parent()
self.settings_dialog.update(main_window.settings, main_window.prices, main_window.p_prices)
self.settings_dialog.show()
def show_all_stats(self):
transactions = Transaction.objects.order_by("time").reverse()
self.all_stats_dialog.update(transactions)
self.all_stats_dialog.setWindowTitle('All statistics')
self.all_stats_dialog.show()
def show_customer_stats(self):
transactions = Transaction.objects.filter(customer__isPuente=False).order_by("time").reverse()
self.all_stats_dialog.update(transactions)
self.all_stats_dialog.setWindowTitle('Customer statistics')
self.all_stats_dialog.show()
def show_team_stats(self):
transactions = Transaction.objects.filter(customer__isPuente=True).order_by("time").reverse()
self.all_stats_dialog.update(transactions)
self.all_stats_dialog.setWindowTitle('Team statistics')
self.all_stats_dialog.show()
def show_menu_edit(self):
self.menu_dialog.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| gpl-3.0 |
fyffyt/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/sandbox/km_class.py | 31 | 11748 | #a class for the Kaplan-Meier estimator
from statsmodels.compat.python import range
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
| bsd-3-clause |
nchammas/spark | python/pyspark/sql/udf.py | 19 | 20044 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-defined function related classes and functions
"""
import functools
import sys
from pyspark import SparkContext
from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.types import StringType, DataType, StructType, _parse_datatype_string
from pyspark.sql.pandas.types import to_arrow_type
__all__ = ["UDFRegistration"]
def _wrap_function(sc, func, returnType):
command = (func, returnType)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
def _create_udf(f, returnType, evalType, name=None, deterministic=True):
# Set the name of the UserDefinedFunction object to be the name of function f
udf_obj = UserDefinedFunction(
f, returnType=returnType, name=name, evalType=evalType, deterministic=deterministic)
return udf_obj._wrapped()
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
Notes
-----
The constructor of this class is not supposed to be directly called.
Use :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
to create this instance.
"""
def __init__(self, func,
returnType=StringType(),
name=None,
evalType=PythonEvalType.SQL_BATCHED_UDF,
deterministic=True):
if not callable(func):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): "
"{0}".format(type(func)))
if not isinstance(returnType, (DataType, str)):
raise TypeError(
"Invalid return type: returnType should be DataType or str "
"but is {}".format(returnType))
if not isinstance(evalType, int):
raise TypeError(
"Invalid evaluation type: evalType should be an int but is {}".format(evalType))
self.func = func
self._returnType = returnType
# Stores UserDefinedPythonFunctions jobj, once initialized
self._returnType_placeholder = None
self._judf_placeholder = None
self._name = name or (
func.__name__ if hasattr(func, '__name__')
else func.__class__.__name__)
self.evalType = evalType
self.deterministic = deterministic
@property
def returnType(self):
# This makes sure this is called after SparkContext is initialized.
# ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string.
if self._returnType_placeholder is None:
if isinstance(self._returnType, DataType):
self._returnType_placeholder = self._returnType
else:
self._returnType_placeholder = _parse_datatype_string(self._returnType)
if self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or \
self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with scalar Pandas UDFs: %s is "
"not supported" % str(self._returnType_placeholder))
elif self.evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with grouped map Pandas UDFs or "
"at groupby.applyInPandas: %s is not supported" % str(
self._returnType_placeholder))
else:
raise TypeError("Invalid return type for grouped map Pandas "
"UDFs or at groupby.applyInPandas: return type must be a "
"StructType.")
elif self.evalType == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type in mapInPandas: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid return type in mapInPandas: "
"return type must be a StructType.")
elif self.evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type in cogroup.applyInPandas: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid return type in cogroup.applyInPandas: "
"return type must be a StructType.")
elif self.evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
try:
# StructType is not yet allowed as a return type, explicitly check here to fail fast
if isinstance(self._returnType_placeholder, StructType):
raise TypeError
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid return type with grouped aggregate Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
return self._returnType_placeholder
@property
def _judf(self):
# It is possible that concurrent access, to newly created UDF,
# will initialize multiple UserDefinedPythonFunctions.
# This is unlikely, doesn't affect correctness,
# and should have a minimal performance impact.
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf()
return self._judf_placeholder
def _create_judf(self):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
wrapped_func = _wrap_function(sc, self.func, self.returnType)
jdt = spark._jsparkSession.parseDataType(self.returnType.json())
judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction(
self._name, wrapped_func, jdt, self.evalType, self.deterministic)
return judf
def __call__(self, *cols):
judf = self._judf
sc = SparkContext._active_spark_context
return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
# This function is for improving the online help system in the interactive interpreter.
# For example, the built-in help / pydoc.help. It wraps the UDF with the docstring and
# argument annotation. (See: SPARK-19161)
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
wrapper._unwrapped = self
return wrapper
def asNondeterministic(self):
"""
Updates UserDefinedFunction to nondeterministic.
.. versionadded:: 2.3
"""
# Here, we explicitly clean the cache to create a JVM UDF instance
# with 'deterministic' updated. See SPARK-23233.
self._judf_placeholder = None
self.deterministic = False
return self
class UDFRegistration(object):
"""
Wrapper for user-defined function registration. This instance can be accessed by
:attr:`spark.udf` or :attr:`sqlContext.udf`.
.. versionadded:: 1.3.1
"""
def __init__(self, sparkSession):
self.sparkSession = sparkSession
def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
.. versionadded:: 1.3.1
Parameters
----------
name : str,
name of the user-defined function in SQL statements.
f : function, :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see the examples below.
Returns
-------
function
a user-defined function
Notes
-----
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
Examples
--------
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)='4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)='3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function (from Spark 2.3.0):
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf
>>> @pandas_udf("integer") # doctest: +SKIP
... def add_one(s: pd.Series) -> pd.Series:
... return s + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
>>> @pandas_udf("integer") # doctest: +SKIP
... def sum_udf(v: pd.Series) -> int:
... return v.sum()
...
>>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP
>>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)]
"""
# This is to check whether the input function is from a user-defined function or
# Python function.
if hasattr(f, 'asNondeterministic'):
if returnType is not None:
raise TypeError(
"Invalid return type: data type can not be specified when f is"
"a user-defined function, but got %s." % returnType)
if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF]:
raise ValueError(
"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF, "
"SQL_SCALAR_PANDAS_ITER_UDF, SQL_GROUPED_AGG_PANDAS_UDF or "
"SQL_MAP_PANDAS_ITER_UDF.")
register_udf = _create_udf(
f.func, returnType=f.returnType, name=name,
evalType=f.evalType, deterministic=f.deterministic)._unwrapped
return_udf = f
else:
if returnType is None:
returnType = StringType()
return_udf = _create_udf(
f, returnType=returnType, evalType=PythonEvalType.SQL_BATCHED_UDF, name=name)
register_udf = return_udf._unwrapped
self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf)
return return_udf
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
.. versionadded:: 2.3.0
Parameters
----------
name : str
name of the user-defined function
javaClassName : str
fully qualified name of java class
returnType : :class:`pyspark.sql.types.DataType` or str, optional
the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength('test')").collect() # doctest: +SKIP
[Row(javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength2('test')").collect() # doctest: +SKIP
[Row(javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
... # doctest: +SKIP
>>> spark.sql("SELECT javaStringLength3('test')").collect() # doctest: +SKIP
[Row(javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):
returnType = _parse_datatype_string(returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
def registerJavaUDAF(self, name, javaClassName):
"""Register a Java user-defined aggregate function as a SQL function.
.. versionadded:: 2.3.0
name : str
name of the user-defined aggregate function
javaClassName : str
fully qualified name of java class
Examples
--------
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
... # doctest: +SKIP
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> q = "SELECT name, javaUDAF(id) as avg from df group by name order by name desc"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(name='b', avg=102.0), Row(name='a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.udf
globs = pyspark.sql.udf.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.udf tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.udf, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
luo66/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
x75/smq | smq/logging.py | 1 | 3866 | """logging lib for smq"""
# please satisfy all logging needs for smq
# 0. fixed length
# 1. synchronous time (hm...)
# 2. numpy arrays
# 3. hdf5 / pytables
# 4. python logging?
# implementations in smp
# - smp/smpblocks/smpblock/blogging.py
# - BlockLogger (nay)
# - x v2 logger (yay)
# - paparazzi/sw/ground_segment/python/paramopt.py
import tables as tb
# declare global h5 file handle
h5file = 0
# is log inited?
# loginit = False
loginit = True
lognodes = {}
################################################################################
# raw tables logging
def init_log2(config):
"""second attempt at log init function: called from Blockspy3 init, triggered by topblock arg of Blockspy3"""
global h5file, loginit, lognodes
# ts = datetime.datetime.now().strftime("%Y%m%d-%H%M%S-%f")
# experiment_name = config.keys()[0]
experiment = "%s" % (config["id"]) # , ts)
tblfile = "data/%s.h5" % (experiment)
h5file = tb.open_file(tblfile, mode = "w", title = "%s" % (experiment))
root = h5file.root
storage_version = "v2"
# create VLArray for storing the graph configuration
conf_array = h5file.create_vlarray(root, 'conf', tb.VLStringAtom(),
"Variable Length Config String")
conf_array.append(str(config))
# FIXME: log git commit
# create topblock array
# a = tb.Float64Atom()
# lognodes[parent.id] = h5file.create_earray(root, "%s_obuf" % (parent.id), a, (parent.odim, 0))
# # create arrays for each node's data
# for nodek, nodev in nodes.items():
# print("init_log: node", nodek)
# a = tb.Float64Atom()
# # node_fieldkeys =
# # for node_fieldkey in ["%s_%03d" % (nodek, i) for i in range(nodev.odim)]:
# # tdef[node_fieldkey] = tb.Float32Col()
# # tdef[nodek] = tb.Float32Col(shape=(nodev.odim, 1))
# # return tdef
# lognodes[nodev.id] = h5file.create_earray(root, "%s_obuf" % (nodev.id), a, (nodev.odim, 0))
# loginit = True # this doesn't work yet persistently, why?
loginit = True
# print("initlog done")
def init_log2_block(tbl_name, tbl_dim, tbl_columns = None):
# print("%s.init_log2_block" % ("logging"), tbl_name, tbl_dim)
global loginit, h5file, lognodes
if loginit:
a = tb.Float64Atom()
lognodes[tbl_name] = h5file.create_earray(h5file.root, "item_%s_data" % (tbl_name), a, (tbl_dim, 0))
def log(nodeid, data):
"""Global logging method, like in python logging: take data, write into corresponding array"""
# FIXME: make a dummy log method an overwrite it on loginit with the real function body?
# print("id: %s, data: %s" % (id, str(data)[:60]))
# print("log")
# print("lognode", lognodes[nodeid])
# print("data shape", data.shape)
lognodes[nodeid].append(data)
################################################################################
# pandas based logging
import pandas as pd
log_store = 0
log_lognodes = {}
log_lognodes_idx = {}
def init_log3(config):
global log_store
experiment = "%s" % (config["id"])
log_store = pd.HDFStore("data/%s_pd.h5" % (experiment))
def init_log3_block(tbl_name, tbl_dim, tbl_columns = None, numsteps=100):
global log_store, log_lognodes
# print "logging.init_log3_block: adding %s to log_lognodes with columns %s" % (tbl_name, tbl_columns)
log_lognodes[tbl_name] = pd.DataFrame(columns=tbl_columns, index = range(numsteps), dtype=float)
log_lognodes_idx[tbl_name] = 0
def log3(nodeid, data):
global log_lognodes, log_lognodes_idx
# print "data.shape", data.flatten().shape, log_lognodes_idx[nodeid]
log_lognodes[nodeid].loc[log_lognodes_idx[nodeid]] = data.flatten()
# log_lognodes[nodeid].loc[0] = 1
# print "log_lognodes[nodeid]", log_lognodes[nodeid].loc[0]
log_lognodes_idx[nodeid] += 1
| mit |
soft-matter/mr | doc/sphinxext/ipython_directive.py | 1 | 29953 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VĂĄclavĹ milauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import ast
import cStringIO
import os
import re
import sys
import tempfile
# Third-party
import matplotlib
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i == N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:' % ''.join(['.'] * (len(str(lineno)) + 2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i < N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
# print("nextline=%s, continuation=%s, starts=%s"%(nextline,
# continuation, nextline.startswith(continuation)))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i += 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i < N - 1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
config.InteractiveShell.cache_size = 0
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir, profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating
# InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
# from IPython.utils.io import Tee
# io.stdout = Tee(self.cout, channel='stdout') # dbg
# io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
# print("input='%s'"%self.input)
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir, filename),
source_dir)
imagerows = ['.. image:: %s' % outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s' % (arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
# print('INPUT:', data) # dbg
is_verbatim = decorator == '@verbatim' or self.is_verbatim
is_doctest = decorator == '@doctest' or self.is_doctest
is_suppress = decorator == '@suppress' or self.is_suppress
is_okexcept = decorator == '@okexcept' or self.is_okexcept
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
def _remove_first_space_if_any(line):
return line[1:] if line.startswith(' ') else line
input_lines = map(_remove_first_space_if_any, input.split('\n'))
self.datacontent = data
continuation = ' %s: ' % ''.join(['.'] * (len(str(lineno)) + 2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
store_history = True
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if is_semicolon or is_suppress:
store_history = False
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s' % (input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s%s' % (continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output.decode('utf-8'))
if not is_okexcept and "Traceback" in output:
sys.stdout.write(output)
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
# print('OUTPUT', output) # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind < 0:
e = 'output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found != submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted))
raise RuntimeError(e)
# print('''doctest PASSED for input_lines="%s" with
# found_output="%s" and submitted output="%s"''' % (input_lines,
# found, submitted))
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
# print('SAVEFIG', command) # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
fmtin = self.promptin
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line) # preserve empty lines in output
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with multilines
if not multiline: # not currently on a multiline
if line_stripped.endswith('\\'): # now we are
multiline = True
cont_len = len(str(lineno)) + 2
line_to_process = line.strip('\\')
output.extend([u"%s %s" % (fmtin % lineno, line)])
continue
else: # no we're still not
line_to_process = line.strip('\\')
else: # we are currently on a multiline
line_to_process += line.strip('\\')
if line_stripped.endswith('\\'): # and we still are
continuation = '.' * cont_len
output.extend(
[(u' %s: ' + line_stripped) % continuation])
continue
# else go ahead and run this multiline then carry on
# get output of line
self.process_input_line(unicode(line_to_process.strip()),
store_history=False)
out_line = self.cout.getvalue()
self.clear_cout()
# clear current figure if plotted
if savefig:
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
# line numbers don't actually matter, they're replaced later
if not multiline:
in_line = u"%s %s" % (fmtin % lineno, line)
output.extend([in_line])
else:
output.extend([(u' %s: ' + line_stripped) % continuation])
multiline = False
if len(out_line):
output.extend([out_line])
output.extend([u''])
return output
def process_pure_python2(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
# nuke empty lines
content = [line for line in content if len(line.strip()) > 0]
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
continuation = u' %s:' % ''.join(['.'] * (len(str(ct)) + 2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception:
multiline = True
multiline_start = lineno
else:
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
ast.parse('\n'.join(content[multiline_start:lineno + 1]))
if (lineno < len(content) - 1 and
_count_indent(content[multiline_start]) <
_count_indent(content[lineno + 1])):
continue
output.extend([continuation, u''])
multiline = False
except Exception:
pass
continue
return output
def _count_indent(x):
import re
m = re.match('(\s+)(.*)', x)
if not m:
return 0
return len(m.group(1))
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = {'python': directives.unchanged,
'suppress': directives.flag,
'verbatim': directives.flag,
'doctest': directives.flag,
'okexcept': directives.flag,
}
shell = EmbeddedSphinxShell()
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.seen_docs.add(self.state.document.current_source)
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s' % savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
# TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.current_content = self.content
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python2(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
# hack
# if row == '':
# continue
# lines.extend([' %s'% row.strip()])
lines.extend([' %s' % line
for line in re.split('[\n]+', row)])
if figure is not None:
figures.append(figure)
# text = '\n'.join(lines)
# figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
# print(lines)
if len(lines) > 2:
if debug:
print('\n'.join(lines))
else: # NOTE: this raises some errors, what's it for?
# print('INSERTING %d lines' % len(lines))
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
# imgnode = nodes.image(figs)
# cleanup
self.teardown()
return [] # , imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print(url.split('&'))
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print(x)
jdh
In [109]: for i in range(10):
n
.....: print(i)
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
# ipython_directive.DEBUG = True # dbg
# options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__ == '__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| gpl-3.0 |
SALib/SALib | src/SALib/analyze/sobol.py | 1 | 12989 | from __future__ import division
from __future__ import print_function
from scipy.stats import norm
import numpy as np
import pandas as pd
from . import common_args
from ..util import read_param_file, compute_groups_matrix, ResultDict
from types import MethodType
from multiprocessing import Pool, cpu_count
from functools import partial
from itertools import combinations, zip_longest
def analyze(problem, Y, calc_second_order=True, num_resamples=100,
conf_level=0.95, print_to_console=False, parallel=False,
n_processors=None, seed=None):
"""Perform Sobol Analysis on model outputs.
Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where
each entry is a list of size D (the number of parameters) containing the
indices in the same order as the parameter file. If calc_second_order is
True, the dictionary also contains keys 'S2' and 'S2_conf'.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
calc_second_order : bool
Calculate second-order sensitivities (default True)
num_resamples : int
The number of resamples (default 100)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
Examples
--------
>>> X = saltelli.sample(problem, 512)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
"""
if seed:
np.random.seed(seed)
# determining if groups are defined and adjusting the number
# of rows in the cross-sampled matrix accordingly
if not problem.get('groups'):
D = problem['num_vars']
else:
D = len(set(problem['groups']))
if calc_second_order and Y.size % (2 * D + 2) == 0:
N = int(Y.size / (2 * D + 2))
elif not calc_second_order and Y.size % (D + 2) == 0:
N = int(Y.size / (D + 2))
else:
raise RuntimeError("""
Incorrect number of samples in model output file.
Confirm that calc_second_order matches option used during sampling.""")
if not 0 < conf_level < 1:
raise RuntimeError("Confidence level must be between 0-1.")
# normalize the model output
Y = (Y - Y.mean()) / Y.std()
A, B, AB, BA = separate_output_values(Y, D, N, calc_second_order)
r = np.random.randint(N, size=(N, num_resamples))
Z = norm.ppf(0.5 + conf_level / 2)
if not parallel:
S = create_Si_dict(D, calc_second_order)
for j in range(D):
S['S1'][j] = first_order(A, AB[:, j], B)
S['S1_conf'][j] = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
S['ST'][j] = total_order(A, AB[:, j], B)
S['ST_conf'][j] = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
# Second order (+conf.)
if calc_second_order:
for j in range(D):
for k in range(j + 1, D):
S['S2'][j, k] = second_order(
A, AB[:, j], AB[:, k], BA[:, j], B)
S['S2_conf'][j, k] = Z * second_order(A[r], AB[r, j],
AB[r, k], BA[r, j],
B[r]).std(ddof=1)
else:
tasks, n_processors = create_task_list(
D, calc_second_order, n_processors)
func = partial(sobol_parallel, Z, A, AB, BA, B, r)
pool = Pool(n_processors)
S_list = pool.map_async(func, tasks)
pool.close()
pool.join()
S = Si_list_to_dict(S_list.get(), D, calc_second_order)
# Print results to console
if print_to_console:
print_indices(S, problem, calc_second_order)
# Add problem context and override conversion method for special case
S.problem = problem
S.to_df = MethodType(to_df, S)
return S
def first_order(A, AB, B):
# First order estimator following Saltelli et al. 2010 CPC, normalized by
# sample variance
return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)
def total_order(A, AB, B):
# Total order estimator following Saltelli et al. 2010 CPC, normalized by
# sample variance
return 0.5 * np.mean((A - AB) ** 2, axis=0) / np.var(np.r_[A, B], axis=0)
def second_order(A, ABj, ABk, BAj, B):
# Second order estimator following Saltelli 2002
Vjk = np.mean(BAj * ABk - A * B, axis=0) / np.var(np.r_[A, B], axis=0)
Sj = first_order(A, ABj, B)
Sk = first_order(A, ABk, B)
return Vjk - Sj - Sk
def create_Si_dict(D, calc_second_order):
# initialize empty dict to store sensitivity indices
S = ResultDict((k, np.zeros(D))
for k in ('S1', 'S1_conf', 'ST', 'ST_conf'))
if calc_second_order:
S['S2'] = np.zeros((D, D))
S['S2'][:] = np.nan
S['S2_conf'] = np.zeros((D, D))
S['S2_conf'][:] = np.nan
return S
def separate_output_values(Y, D, N, calc_second_order):
AB = np.zeros((N, D))
BA = np.zeros((N, D)) if calc_second_order else None
step = 2 * D + 2 if calc_second_order else D + 2
A = Y[0:Y.size:step]
B = Y[(step - 1):Y.size:step]
for j in range(D):
AB[:, j] = Y[(j + 1):Y.size:step]
if calc_second_order:
BA[:, j] = Y[(j + 1 + D):Y.size:step]
return A, B, AB, BA
def sobol_parallel(Z, A, AB, BA, B, r, tasks):
sobol_indices = []
for d, j, k in tasks:
if d == 'S1':
s = first_order(A, AB[:, j], B)
elif d == 'S1_conf':
s = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
elif d == 'ST':
s = total_order(A, AB[:, j], B)
elif d == 'ST_conf':
s = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
elif d == 'S2':
s = second_order(A, AB[:, j], AB[:, k], BA[:, j], B)
elif d == 'S2_conf':
s = Z * second_order(A[r], AB[r, j], AB[r, k],
BA[r, j], B[r]).std(ddof=1)
sobol_indices.append([d, j, k, s])
return sobol_indices
def create_task_list(D, calc_second_order, n_processors):
# Create list with one entry (key, parameter 1, parameter 2) per sobol
# index (+conf.). This is used to supply parallel tasks to
# multiprocessing.Pool
tasks_first_order = [[d, j, None] for j in range(
D) for d in ('S1', 'S1_conf', 'ST', 'ST_conf')]
# Add second order (+conf.) to tasks
tasks_second_order = []
if calc_second_order:
tasks_second_order = [[d, j, k] for j in range(D) for k in
range(j + 1, D) for d in ('S2', 'S2_conf')]
if n_processors is None:
n_processors = min(cpu_count(), len(
tasks_first_order) + len(tasks_second_order))
if not calc_second_order:
tasks = np.array_split(tasks_first_order, n_processors)
else:
# merges both lists alternating its elements and splits the
# resulting lists into n_processors sublists
tasks = np.array_split([v for v in sum(
zip_longest(tasks_first_order[::-1], tasks_second_order), ())
if v is not None], n_processors)
return tasks, n_processors
def Si_list_to_dict(S_list, D, calc_second_order):
# Convert the parallel output into the regular dict format for
# printing/returning
S = create_Si_dict(D, calc_second_order)
L = []
for l in S_list: # first reformat to flatten
L += l
for s in L: # First order (+conf.)
if s[2] is None:
S[s[0]][s[1]] = s[3]
else:
S[s[0]][s[1], s[2]] = s[3]
return S
def Si_to_pandas_dict(S_dict):
"""Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of
(None, None)
Examples
--------
>>> X = saltelli.sample(problem, 512)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
"""
problem = S_dict.problem
total_order = {
'ST': S_dict['ST'],
'ST_conf': S_dict['ST_conf']
}
first_order = {
'S1': S_dict['S1'],
'S1_conf': S_dict['S1_conf']
}
idx = None
second_order = None
if 'S2' in S_dict:
if not problem.get('groups'):
names = problem['names']
else:
_, names = compute_groups_matrix(problem['groups'])
idx = list(combinations(names, 2))
second_order = {
'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]
for i in idx],
'S2_conf': [S_dict['S2_conf'][names.index(i[0]), names.index(i[1])]
for i in idx]
}
return total_order, first_order, (idx, second_order)
def to_df(self):
'''Conversion method to Pandas DataFrame. To be attached to ResultDict.
Returns
========
List : of Pandas DataFrames in order of Total, First, Second
'''
total, first, (idx, second) = Si_to_pandas_dict(self)
if not self.problem.get('groups'):
names = self.problem['names']
else:
_, names = compute_groups_matrix(self.problem['groups'])
ret = [pd.DataFrame(total, index=names),
pd.DataFrame(first, index=names)]
if second:
ret += [pd.DataFrame(second, index=idx)]
return ret
def print_indices(S, problem, calc_second_order):
# Output to console
if not problem.get('groups'):
title = 'Parameter'
names = problem['names']
D = problem['num_vars']
else:
title = 'Group'
_, names = compute_groups_matrix(problem['groups'])
D = len(names)
print('%s S1 S1_conf ST ST_conf' % title)
for j in range(D):
print('%s %f %f %f %f' % (names[j], S['S1'][
j], S['S1_conf'][j], S['ST'][j], S['ST_conf'][j]))
if calc_second_order:
print('\n%s_1 %s_2 S2 S2_conf' % (title, title))
for j in range(D):
for k in range(j + 1, D):
print("%s %s %f %f" % (names[j], names[k],
S['S2'][j, k], S['S2_conf'][j, k]))
def cli_parse(parser):
parser.add_argument('--max-order', type=int, required=False, default=2,
choices=[1, 2],
help='Maximum order of sensitivity indices to '
'calculate')
parser.add_argument('-r', '--resamples', type=int, required=False,
default=1000,
help='Number of bootstrap resamples for Sobol '
'confidence intervals')
parser.add_argument('--parallel', action='store_true', help='Makes '
'use of parallelization.',
dest='parallel')
parser.add_argument('--processors', type=int, required=False,
default=None,
help='Number of processors to be used with the ' +
'parallel option.', dest='n_processors')
return parser
def cli_action(args):
problem = read_param_file(args.paramfile)
Y = np.loadtxt(args.model_output_file, delimiter=args.delimiter,
usecols=(args.column,))
analyze(problem, Y, (args.max_order == 2),
num_resamples=args.resamples, print_to_console=True,
parallel=args.parallel, n_processors=args.n_processors,
seed=args.seed)
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
| mit |
Geosyntec/gisutils | gisutils/tests/test_algo.py | 2 | 1937 | from shapely import geometry
import pandas
import geopandas
import numpy
from affine import Affine
from gisutils import algo
import numpy.testing as nptest
import pandas.util.testing as pdtest
def test_average_slope():
_lines = [
geometry.LineString(coordinates=[(0, 5), (10, 5)]),
geometry.LineString(coordinates=[(5, 0), (5, 10)]),
geometry.LineString(coordinates=[(0, 0), (10, 10)]),
geometry.LineString(coordinates=[(0, 0), (0, 10), (10, 10)]),
geometry.LineString(coordinates=[(0, 0), (5, 5), (5, 0), (0, 0)]),
]
lines = geopandas.GeoDataFrame(geometry=_lines)
expected = pandas.Series([
1.000000,
0.000000,
0.707107,
0.500000,
0.000000,
]) * 100
hill = numpy.mgrid[:11, :11][1]
trans = Affine.translation(0, 10) * Affine.rotation(0) * Affine.scale(1, -1)
result = algo.average_slope(lines, hill, trans)
pdtest.assert_series_equal(result, expected)
def test_compute_sinuosity():
_lines = [
geometry.LineString(coordinates=[(0, 5), (10, 5)]),
geometry.LineString(coordinates=[(0, 0), (0, 10), (10, 10)]),
geometry.LineString(coordinates=[(0, 0), (5, 5), (5, 0), (0, 0)]),
]
lines = geopandas.GeoDataFrame(geometry=_lines)
expected = pandas.Series([
1.000000,
1.414213,
numpy.inf,
])
result = algo.compute_sinuosity(lines)
pdtest.assert_series_equal(result, expected)
def test_bearing_from_north():
gdf = geopandas.GeoDataFrame(
data=[1, 2, 3, 4, 5],
geometry=[
geometry.Point(0, 0),
geometry.Point(2, 2),
geometry.Point(4, 0),
geometry.Point(2, -2),
geometry.Point(0, 0)
]
)
result = algo.bearing_from_north(gdf)
expected = numpy.array((numpy.nan, 1, 3, 5, 7)) * numpy.pi / 4
nptest.assert_array_equal(result, expected)
| bsd-3-clause |
CDSFinance/zipline | zipline/transforms/batch_transform.py | 10 | 17792 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generator versions of transforms.
"""
import functools
import logbook
import numpy
from numbers import Integral
import pandas as pd
from six import (
string_types,
itervalues,
iteritems
)
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.data import MutableIndexRollingPanel
from zipline.protocol import Event
log = logbook.Logger('BatchTransform')
func_map = {'open_price': 'first',
'close_price': 'last',
'low': 'min',
'high': 'max',
'volume': 'sum'
}
def get_sample_func(item):
if item in func_map:
return func_map[item]
else:
return 'last'
def downsample_panel(minute_rp, daily_rp, mkt_close):
"""
@minute_rp is a rolling panel, which should have minutely rows
@daily_rp is a rolling panel, which should have daily rows
@dt is the timestamp to use when adding a frame to daily_rp
Using the history in minute_rp, a new daily bar is created by
downsampling. The data from the daily bar is then added to the
daily rolling panel using add_frame.
"""
cur_panel = minute_rp.get_current()
sids = minute_rp.minor_axis
day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)
env = get_algo_instance().trading_environment
dt1 = env.normalize_date(mkt_close)
dt2 = env.next_trading_day(mkt_close)
by_close = functools.partial(get_date, mkt_close, dt1, dt2)
for item in minute_rp.items:
frame = cur_panel[item]
func = get_sample_func(item)
# group by trading day, using the market close of the current
# day. If events occurred after the last close (yesterday) but
# before today's close, group them into today.
dframe = frame.groupby(lambda d: by_close(d)).agg(func)
for stock in sids:
day_frame[stock][item] = dframe[stock].ix[dt1]
# store the frame at midnight instead of the close
daily_rp.add_frame(dt1, day_frame)
def get_date(mkt_close, d1, d2, d):
if d > mkt_close:
return d2
else:
return d1
class InvalidWindowLength(Exception):
"""
Error raised when the window length is unusable.
"""
pass
def check_window_length(window_length):
"""
Ensure the window length provided to a transform is valid.
"""
if window_length is None:
raise InvalidWindowLength("window_length must be provided")
if not isinstance(window_length, Integral):
raise InvalidWindowLength(
"window_length must be an integer-like number")
if window_length == 0:
raise InvalidWindowLength("window_length must be non-zero")
if window_length < 0:
raise InvalidWindowLength("window_length must be positive")
class BatchTransform(object):
"""Base class for batch transforms with a trailing window of
variable length. As opposed to pure EventWindows that get a stream
of events and are bound to a single SID, this class creates stream
of pandas DataFrames with each colum representing a sid.
There are two ways to create a new batch window:
(i) Inherit from BatchTransform and overload get_value(data).
E.g.:
```
class MyBatchTransform(BatchTransform):
def get_value(self, data):
# compute difference between the means of sid 0 and sid 1
return data[0].mean() - data[1].mean()
```
(ii) Use the batch_transform decorator.
E.g.:
```
@batch_transform
def my_batch_transform(data):
return data[0].mean() - data[1].mean()
```
In your algorithm you would then have to instantiate
this in the initialize() method:
```
self.my_batch_transform = MyBatchTransform()
```
To then use it, inside of the algorithm handle_data(), call the
handle_data() of the BatchTransform and pass it the current event:
```
result = self.my_batch_transform(data)
```
"""
def __init__(self,
func=None,
refresh_period=0,
window_length=None,
clean_nans=True,
sids=None,
fields=None,
compute_only_full=True,
bars='daily',
downsample=False):
"""Instantiate new batch_transform object.
:Arguments:
func : python function <optional>
If supplied will be called after each refresh_period
with the data panel and all args and kwargs supplied
to the handle_data() call.
refresh_period : int
Interval to wait between advances in the window.
window_length : int
How many days the trailing window should have.
clean_nans : bool <default=True>
Whether to (forward) fill in nans.
sids : list <optional>
Which sids to include in the moving window. If not
supplied sids will be extracted from incoming
events.
fields : list <optional>
Which fields to include in the moving window
(e.g. 'price'). If not supplied, fields will be
extracted from incoming events.
compute_only_full : bool <default=True>
Only call the user-defined function once the window is
full. Returns None if window is not full yet.
downsample : bool <default=False>
If true, downsample bars to daily bars. Otherwise, do nothing.
"""
if func is not None:
self.compute_transform_value = func
else:
self.compute_transform_value = self.get_value
self.clean_nans = clean_nans
self.compute_only_full = compute_only_full
# no need to down sample if the bars are already daily
self.downsample = downsample and (bars == 'minute')
# How many bars are in a day
self.bars = bars
if self.bars == 'daily':
self.bars_in_day = 1
elif self.bars == 'minute':
self.bars_in_day = int(6.5 * 60)
else:
raise ValueError('%s bars not understood.' % self.bars)
# The following logic is to allow pre-specified sid filters
# to operate on the data, but to also allow new symbols to
# enter the batch transform's window IFF a sid filter is not
# specified.
if sids is not None:
if isinstance(sids, (string_types, Integral)):
self.static_sids = set([sids])
else:
self.static_sids = set(sids)
else:
self.static_sids = None
self.initial_field_names = fields
if isinstance(self.initial_field_names, string_types):
self.initial_field_names = [self.initial_field_names]
self.field_names = set()
self.refresh_period = refresh_period
check_window_length(window_length)
self.window_length = window_length
self.trading_days_total = 0
self.window = None
self.full = False
# Set to -inf essentially to cause update on first attempt.
self.last_dt = pd.Timestamp('1900-1-1', tz='UTC')
self.updated = False
self.cached = None
self.last_args = None
self.last_kwargs = None
# Data panel that provides bar information to fill in the window,
# when no bar ticks are available from the data source generator
# Used in universes that 'rollover', e.g. one that has a different
# set of stocks per quarter
self.supplemental_data = None
self.rolling_panel = None
self.daily_rolling_panel = None
def handle_data(self, data, *args, **kwargs):
"""
Point of entry. Process an event frame.
"""
# extract dates
dts = [event.dt for event in itervalues(data._data)]
# we have to provide the event with a dt. This is only for
# checking if the event is outside the window or not so a
# couple of seconds shouldn't matter. We don't add it to
# the data parameter, because it would mix dt with the
# sid keys.
event = Event()
event.dt = max(dts)
event.data = {k: v.__dict__ for k, v in iteritems(data._data)
# Need to check if data has a 'length' to filter
# out sids without trade data available.
# TODO: expose more of 'no trade available'
# functionality to zipline
if len(v)}
# only modify the trailing window if this is
# a new event. This is intended to make handle_data
# idempotent.
if self.last_dt < event.dt:
self.updated = True
self._append_to_window(event)
else:
self.updated = False
# return newly computed or cached value
return self.get_transform_value(*args, **kwargs)
def _init_panels(self, sids):
if self.downsample:
self.rolling_panel = MutableIndexRollingPanel(
self.bars_in_day,
self.field_names,
sids,
)
self.daily_rolling_panel = MutableIndexRollingPanel(
self.window_length,
self.field_names,
sids,
)
else:
self.rolling_panel = MutableIndexRollingPanel(
self.window_length * self.bars_in_day,
self.field_names,
sids,
)
def _append_to_window(self, event):
self.field_names = self._get_field_names(event)
if self.static_sids is None:
sids = set(event.data.keys())
else:
sids = self.static_sids
# the panel sent to the transform code will have
# columns masked with this set of sids. This is how
# we guarantee that all (and only) the sids sent to the
# algorithm's handle_data and passed to the batch
# transform. See the get_data method to see it applied.
# N.B. that the underlying panel grows monotonically
# if the set of sids changes over time.
self.latest_sids = sids
# Create rolling panel if not existant
if self.rolling_panel is None:
self._init_panels(sids)
# Store event in rolling frame
self.rolling_panel.add_frame(event.dt,
pd.DataFrame(event.data,
index=self.field_names,
columns=sids))
# update trading day counters
# we may get events from non-trading sources which occurr on
# non-trading days. The book-keeping for market close and
# trading day counting should only consider trading days.
env = get_algo_instance().trading_environment
if env.is_trading_day(event.dt):
_, mkt_close = env.get_open_and_close(event.dt)
if self.bars == 'daily':
# Daily bars have their dt set to midnight.
mkt_close = env.normalize_date(mkt_close)
if event.dt == mkt_close:
if self.downsample:
downsample_panel(self.rolling_panel,
self.daily_rolling_panel,
mkt_close
)
self.trading_days_total += 1
self.mkt_close = mkt_close
self.last_dt = event.dt
if self.trading_days_total >= self.window_length:
self.full = True
def get_transform_value(self, *args, **kwargs):
"""Call user-defined batch-transform function passing all
arguments.
Note that this will only call the transform if the datapanel
has actually been updated. Otherwise, the previously, cached
value will be returned.
"""
if self.compute_only_full and not self.full:
return None
#################################################
# Determine whether we should call the transform
# 0. Support historical/legacy usage of '0' signaling,
# 'update on every bar'
if self.refresh_period == 0:
period_signals_update = True
else:
# 1. Is the refresh period over?
period_signals_update = (
self.trading_days_total % self.refresh_period == 0)
# 2. Have the args or kwargs been changed since last time?
args_updated = args != self.last_args or kwargs != self.last_kwargs
# 3. Is this a downsampled batch, and is the last event mkt close?
downsample_ready = not self.downsample or \
self.last_dt == self.mkt_close
recalculate_needed = downsample_ready and \
(args_updated or (period_signals_update and self.updated))
###################################################
if recalculate_needed:
self.cached = self.compute_transform_value(
self.get_data(),
*args,
**kwargs
)
self.last_args = args
self.last_kwargs = kwargs
return self.cached
def get_data(self):
"""Create a pandas.Panel (i.e. 3d DataFrame) from the
events in the current window.
Returns:
The resulting panel looks like this:
index : field_name (e.g. price)
major axis/rows : dt
minor axis/colums : sid
"""
if self.downsample:
data = self.daily_rolling_panel.get_current()
else:
data = self.rolling_panel.get_current()
if self.supplemental_data is not None:
for item in data.items:
if item not in self.supplemental_data.items:
continue
for dt in data.major_axis:
try:
supplemental_for_dt = self.supplemental_data.ix[
item, dt, :]
except KeyError:
# Only filling in data available in supplemental data.
supplemental_for_dt = None
if supplemental_for_dt is not None:
data[item].ix[dt] = \
supplemental_for_dt.combine_first(
data[item].ix[dt])
# screen out sids no longer in the multiverse
data = data.ix[:, :, self.latest_sids]
if self.clean_nans:
# Fills in gaps of missing data during transform
# of multiple stocks. E.g. we may be missing
# minute data because of illiquidity of one stock
data = data.fillna(method='ffill')
# Hold on to a reference to the data,
# so that it's easier to find the current data when stepping
# through with a debugger
self._curr_data = data
return data
def get_value(self, *args, **kwargs):
raise NotImplementedError(
"Either overwrite get_value or provide a func argument.")
def __call__(self, f):
self.compute_transform_value = f
return self.handle_data
def _extract_field_names(self, event):
# extract field names from sids (price, volume etc), make sure
# every sid has the same fields.
sid_keys = []
for sid in itervalues(event.data):
keys = set([name for name, value in sid.items()
if isinstance(value,
(int,
float,
numpy.integer,
numpy.float,
numpy.long))
])
sid_keys.append(keys)
# with CUSTOM data events, there may be different fields
# per sid. So the allowable keys are the union of all events.
union = set.union(*sid_keys)
unwanted_fields = {
'portfolio',
'sid',
'dt',
'type',
'source_id',
'_initial_len',
}
return union - unwanted_fields
def _get_field_names(self, event):
if self.initial_field_names is not None:
return self.initial_field_names
else:
self.latest_names = self._extract_field_names(event)
return set.union(self.field_names, self.latest_names)
def batch_transform(func):
"""Decorator function to use instead of inheriting from BatchTransform.
For an example on how to use this, see the doc string of BatchTransform.
"""
@functools.wraps(func)
def create_window(*args, **kwargs):
# passes the user defined function to BatchTransform which it
# will call instead of self.get_value()
return BatchTransform(*args, func=func, **kwargs)
return create_window
| apache-2.0 |
vermouthmjl/scikit-learn | sklearn/tests/test_common.py | 27 | 8389 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.decomposition import ProjectedGradientNMF
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check, name, Estimator
else:
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
estimator = Estimator()
else:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
if isinstance(estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check_transformer_n_iter, name, estimator
else:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# The ProjectedGradientNMF class is deprecated
if issubclass(Estimator, ProjectedGradientNMF):
with ignore_warnings():
yield check_get_params_invariance, name, Estimator
else:
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
hamedhsn/incubator-airflow | airflow/contrib/plugins/metastore_browser/main.py | 62 | 5773 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| apache-2.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/tests/test_report.py | 1 | 10209 | # Authors: Mainak Jas <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
import warnings
import shutil
from nose.tools import assert_true, assert_equal, assert_raises
from mne import Epochs, read_events, pick_types, read_evokeds
from mne.io import Raw
from mne.datasets import testing
from mne.report import Report
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
requires_PIL, run_tests_if_main, slow_test)
from mne.viz import plot_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
# Set our plotters to test mode
warnings.simplefilter('always') # enable b/c these tests throw warnings
@slow_test
@testing.requires_testing_data
@requires_PIL
def test_render_report():
"""Test rendering -*.fif files for mne report.
"""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = Raw(raw_fname_new)
picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
epochs.save(epochs_fname)
epochs.average().save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, on_error='raise')
assert_true(len(w) >= 1)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving functionality
report.data_path = tempdir
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, pattern=pattern)
assert_true(len(w) >= 1)
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
@testing.requires_testing_data
@requires_mayavi
@requires_PIL
def test_render_add_sections():
"""Test adding figures/images to section.
"""
from PIL import Image
tempdir = _TempDir()
import matplotlib.pyplot as plt
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png and then gif
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
im = Image.open(img_fname)
op.join(tempdir, 'testimage.gif')
im.save(img_fname) # matplotlib does not support gif
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
assert_raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
assert_raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
@slow_test
@testing.requires_testing_data
@requires_mayavi
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report.
"""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
n_jobs=2)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report.
"""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(tempdir)
assert_true(len(w) >= 1)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report.
"""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert_true(html in html_compare)
def test_add_slider_to_section():
"""Test adding a slider with a series of images to mne report.
"""
tempdir = _TempDir()
from matplotlib import pyplot as plt
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = list()
figs.append(plt.figure())
plt.plot([1, 2, 3])
plt.close('all')
figs.append(plt.figure())
plt.plot([3, 2, 1])
plt.close('all')
report.add_slider_to_section(figs, section=section)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert_raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
assert_raises(ValueError, report.add_slider_to_section, figs, ['wug'])
assert_raises(TypeError, report.add_slider_to_section, figs, 'wug')
def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
assert_raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
assert_raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
run_tests_if_main()
| bsd-3-clause |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter07/ts_smoothing_alternative.py | 1 | 2029 | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# change the font size
matplotlib.rc('xtick', labelsize=9)
matplotlib.rc('ytick', labelsize=9)
matplotlib.rc('font', size=14)
def holt_transform(column, alpha):
'''
Method to apply Holt transform
The transform is given as
y(t) = alpha * x(t) + (1-alpha) y(t-1)
'''
# create an np.array from the column
original = np.array(column)
# starting point for the transformation
transformed = [original[0]]
# apply the transform to the rest of the data
for i in range(1, len(original)):
transformed.append(
original[i] * alpha +
(1-alpha) * transformed[-1])
return transformed
# time series tools
import statsmodels.api as sm
# folder with data
data_folder = '../../Data/Chapter07/'
# colors
colors = ['#FF6600', '#000000', '#29407C', '#660000']
# read the data
riverFlows = pd.read_csv(data_folder + 'combined_flow.csv',
index_col=0, parse_dates=[0])
# transformations
ma_transformHolt = riverFlows.apply(
lambda col: holt_transform(col, 0.5), axis=0)
difference = riverFlows - riverFlows.shift()
# plot the data
fig, ax = plt.subplots(2, 3, sharex=True)
# set the size of the figure explicitly
fig.set_size_inches(12, 7)
# plot the charts for american
ax[0, 0].plot(riverFlows['american_flow'], colors[0])
ax[0, 1].plot(ma_transformHolt['american_flow'],colors[1])
ax[0, 2].plot(difference['american_flow'],colors[2])
# plot the charts for columbia
ax[1, 0].plot(riverFlows['columbia_flow'], colors[0])
ax[1, 1].plot(ma_transformHolt['columbia_flow'],colors[1])
ax[1, 2].plot(difference['columbia_flow'],colors[2])
# set titles for columns
ax[0, 0].set_title('Original')
ax[0, 1].set_title('Holt transform')
ax[0, 2].set_title('Differencing')
# set titles for rows
ax[0, 0].set_ylabel('American')
ax[1, 0].set_ylabel('Columbia')
# save the chart
plt.savefig(data_folder + 'charts/holt_transform.png',
dpi=300)
| gpl-2.0 |
nvoron23/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/series/methods/test_describe.py | 1 | 3301 | import numpy as np
from pandas import Period, Series, Timedelta, Timestamp, date_range
import pandas._testing as tm
class TestSeriesDescribe:
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name="int_data")
result = s.describe()
expected = Series(
[5, 2, s.std(), 0, 1, 2, 3, 4],
name="int_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name="bool_data")
result = s.describe()
expected = Series(
[5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
s = Series(["a", "a", "b", "c", "d"], name="str_data")
result = s.describe()
expected = Series(
[5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
s = Series(
[
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
Timedelta("5 days"),
],
name="timedelta_data",
)
result = s.describe()
expected = Series(
[5, s[2], s.std(), s[0], s[1], s[2], s[3], s[4]],
name="timedelta_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
s = Series(
[Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")],
name="period_data",
)
result = s.describe()
expected = Series(
[3, 2, s[0], 2],
name="period_data",
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
def test_describe_empty_object(self):
# https://github.com/pandas-dev/pandas/issues/27183
s = Series([None, None], dtype=object)
result = s.describe()
expected = Series(
[0, 0, np.nan, np.nan],
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
result = s[:0].describe()
tm.assert_series_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2])
assert np.isnan(result.iloc[3])
def test_describe_with_tz(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
name = str(tz_naive_fixture)
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
result = s.describe()
expected = Series(
[
5,
Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
s[1],
s[2],
s[3],
end.tz_localize(tz),
],
name=name,
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
RPGOne/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
zorojean/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
flybird119/voltdb | tools/vis2.py | 4 | 14464 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
from operator import itemgetter, attrgetter
import numpy as np
STATS_SERVER = 'volt2'
COLORS = ['b','g','c','m','k']
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
mc = {}
def get_stats(hostname, port, days):
"""Get most recent run statistics of all apps within the last 'days'
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'AverageOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
maxdate = datetime.datetime(1970,1,1,0,0,0)
mindate = datetime.datetime(2038,1,19,0,0,0)
stats = dict()
run_stat_keys = ['app', 'nodes', 'branch', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
group = (row[0],row[1])
app_stats = []
maxdate = max(maxdate, row[3])
mindate = min(mindate, row[3])
if group not in stats:
stats[group] = app_stats
else:
app_stats = stats[group]
run_stats = dict(zip(run_stat_keys, row))
app_stats.append(run_stats)
return (stats, mindate, maxdate)
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h, xmin, xmax, series):
self.filename = filename
self.legends = {}
w = w == None and 2000 or w
h = h == None and 1000 or h
self.xmax = xmax
self.xmin = xmin
self.series = series
self.title = title
self.fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = self.fig.add_subplot(111)
self.ax.set_title(title)
plt.tick_params(axis='x', which='major', labelsize=16)
plt.tick_params(axis='y', labelright=True, labelleft=False, labelsize=16)
plt.Locator.MAXTICKS=2000
plt.grid(True)
self.fig.autofmt_xdate()
plt.ylabel(ylabel)
plt.xlabel(xlabel)
def plot(self, x, y, color, marker_shape, legend, linestyle):
self.ax.plot(x, y, linestyle, label=legend, color=color,
marker=marker_shape, markerfacecolor=color, markersize=8)
def close(self):
x_formatter = matplotlib.dates.DateFormatter("%b %d %y")
self.ax.xaxis.set_major_formatter(x_formatter)
xmin, xmax = plt.xlim()
if (self.xmax - self.xmin).days >= 365:
l = 13
loc = matplotlib.dates.WeekdayLocator(byweekday=matplotlib.dates.MO, interval=13)
minloc = None
else:
l = 7
loc = matplotlib.dates.WeekdayLocator(byweekday=matplotlib.dates.MO, interval=1)
minloc = matplotlib.ticker.AutoMinorLocator(n=l)
if loc:
self.ax.xaxis.set_major_locator(loc)
if minloc:
self.ax.xaxis.set_minor_locator(minloc)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
self.ax.yaxis.set_major_formatter(y_formatter)
ymin, ymax = plt.ylim()
plt.xlim((self.xmin.toordinal(), (self.xmax+datetime.timedelta(1)).replace(minute=0, hour=0, second=0, microsecond=0).toordinal()))
if self.series.startswith('lat'):
lloc = 2
else:
lloc = 3
plt.legend(prop={'size': 10}, loc=lloc)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
plt.close('all')
def plot(title, xlabel, ylabel, filename, width, height, app, data, series, mindate, maxdate, polarity):
global mc
plot_data = dict()
for run in data:
if run['branch'] not in plot_data:
plot_data[run['branch']] = {series: []}
if series == 'tppn':
value = run['tps']/run['nodes']
else:
value = run[series]
datenum = matplotlib.dates.date2num(run['date'])
plot_data[run['branch']][series].append((datenum,value))
if len(plot_data) == 0:
return
pl = Plot(title, xlabel, ylabel, filename, width, height, mindate, maxdate, series)
flag = dict()
for b,bd in plot_data.items():
for k,v in bd.items():
if k not in flag.keys():
flag[k] = []
v = sorted(v, key=lambda x: x[0])
u = zip(*v)
if b not in mc:
mc[b] = (COLORS[len(mc.keys())%len(COLORS)], MARKERS[len(mc.keys())%len(MARKERS)])
pl.plot(u[0], u[1], mc[b][0], mc[b][1], b, '-')
ma = [None]
if b == 'master' and len(u[0]) >= 10:
(ma,mstd) = moving_average(u[1], 10)
pl.plot(u[0], ma, mc[b][0], None, None, ":")
failed = 0
if polarity==1:
cv = np.nanmin(ma)
rp = (u[0][np.nanargmin(ma)], cv)
if b == 'master' and ma[-1] > cv * 1.05:
failed = 1
else:
cv = np.nanmax(ma)
rp = (u[0][np.nanargmax(ma)], cv)
if b == 'master' and ma[-1] < cv * 0.95:
failed = 1
twosigma = np.sum([np.convolve(mstd, polarity*2), ma], axis=0)
pl.plot(u[0], twosigma, mc[b][0], None, None, '-.')
pl.ax.annotate(r"$2\sigma$", xy=(u[0][-1], twosigma[-1]), xycoords='data', xytext=(20,0), textcoords='offset points', ha='right')
twntypercent = np.sum([np.convolve(ma, polarity*0.2), ma], axis=0)
pl.plot(u[0], twntypercent, mc[b][0], None, None, '-.')
pl.ax.annotate(r"20%", xy=(u[0][-1], twntypercent[-1]), xycoords='data', xytext=(20,0), textcoords='offset points', ha='right')
p = (ma[-1]-rp[1])/rp[1]*100.
if failed != 0:
if p<10:
color = 'yellow'
else:
color = 'red'
flag[k].append((b, p))
for pos in ['top', 'bottom', 'right', 'left']:
pl.ax.spines[pos].set_edgecolor(color)
pl.ax.set_axis_bgcolor(color)
pl.ax.patch.set_alpha(0.1)
pl.ax.annotate("%.2f" % cv, xy=rp, xycoords='data', xytext=(0,-10*polarity),
textcoords='offset points', ha='center')
pl.ax.annotate("%.2f" % ma[-1], xy=(u[0][-1],ma[-1]), xycoords='data', xytext=(5,+5),
textcoords='offset points', ha='left')
pl.ax.annotate("(%+.2f%%)" % p, xy=(u[0][-1],ma[-1]), xycoords='data', xytext=(5,-5),
textcoords='offset points', ha='left')
"""
#pl.ax.annotate(b, xy=(u[0][-1],u[1][-1]), xycoords='data',
# xytext=(0, 0), textcoords='offset points') #, arrowprops=dict(arrowstyle="->"))
x = u[0][-1]
y = u[1][-1]
pl.ax.annotate(str(y), xy=(x,y), xycoords='data', xytext=(5,0),
textcoords='offset points', ha='left')
xmin, ymin = [(u[0][i],y) for i,y in enumerate(u[1]) if y == min(u[1])][-1]
xmax, ymax= [(u[0][i],y) for i,y in enumerate(u[1]) if y == max(u[1])][-1]
if ymax != ymin:
if xmax != x:
pl.ax.annotate(str(ymax), xy=(xmax,ymax),
textcoords='offset points', ha='center', va='bottom', xytext=(0,5))
if xmin != x:
pl.ax.annotate(str(ymin), xy=(xmin,ymin),
textcoords='offset points', ha='center', va='top', xytext=(0,-5))
"""
pl.close()
return flag
def generate_index_file(filenames):
row = """
<tr>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
</tr>
"""
sep = """
</table>
<table frame="box">
<tr>
<th colspan="3"><a name="%s">%s</a></th>
</tr>
"""
full_content = """
<html>
<head>
<title>Performance Graphs</title>
</head>
<body>
Generated on %s
<table frame="box">
%s
</table>
</body>
</html>
"""
hrow = """
<tr>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
<td %s><a href=#%s>%s</a></td>
</tr>
"""
#h = map(lambda x:(x[0].replace(' ','%20'), x[0]), filenames)
h = []
for x in filenames:
tdattr = "<span></span>" #"bgcolor=green"
tdnote = ""
M = 0.0
if len(x) == 6:
for v in x[5].values():
if len(v) > 0:
M = max(M, abs(v[0][1]))
if M > 0.0:
tdattr = '<span style="color:yellow">►</span>'
if M > 10.0:
tdattr = '<span style="color:red">►</span>'
tdnote = " (by %.2f%%)" % M
h.append(("", x[0].replace(' ','%20'), tdattr + x[0] + tdnote))
n = 4
z = n-len(h)%n
while z > 0 and z < n:
h.append(('','',''))
z -= 1
rows = []
t = ()
for i in range(1, len(h)+1):
t += tuple(h[i-1])
if i%n == 0:
rows.append(hrow % t)
t = ()
last_app = None
for i in filenames:
if i[0] != last_app:
rows.append(sep % (i[0], i[0]))
last_app = i[0]
rows.append(row % (i[1], i[1], i[2], i[2], i[3], i[3]))
return full_content % (time.strftime("%Y/%m/%d %H:%M:%S"), ''.join(rows))
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n-1] = None
s = [float('NaN')]*(n-1)
for d in range(n, len(x)+1):
s.append(np.std(x[d-n:d]))
return (a,s)
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base [ndays]" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[1], "does not exist"
exit(-1)
prefix = sys.argv[2]
path = os.path.join(sys.argv[1], sys.argv[2])
ndays = 2000
if len(sys.argv) >=4:
ndays = int(sys.argv[3])
width = None
height = None
if len(sys.argv) >= 5:
width = int(sys.argv[4])
if len(sys.argv) >= 6:
height = int(sys.argv[5])
# show all the history
(stats, mindate, maxdate) = get_stats(STATS_SERVER, 21212, ndays)
mindate = (mindate).replace(hour=0, minute=0, second=0, microsecond=0)
maxdate = (maxdate + datetime.timedelta(days=1)).replace(minute=0, hour=0, second=0, microsecond=0)
root_path = path
filenames = [] # (appname, latency, throughput)
iorder = 0
for group, data in stats.iteritems():
(app,nodes) = group
app = app.replace('/','')
conn = FastSerializer(STATS_SERVER, 21212)
proc = VoltProcedure(conn, "@AdHoc", [FastSerializer.VOLTTYPE_STRING])
resp = proc.call(["select chart_order, series, chart_heading, x_label, y_label, polarity from charts where appname = '%s' order by chart_order" % app])
conn.close()
app = app +" %d %s" % (nodes, ["node","nodes"][nodes>1])
#chart polarity: -1 for tps (decreasing is bad), 1 for latencies (increasing is bad)
legend = { 1 : dict(series="lat95", heading="95tile latency", xlabel="Time", ylabel="Latency (ms)", polarity=1),
2 : dict(series="lat99", heading="99tile latency", xlabel="Time", ylabel="Latency (ms)", polarity=1),
3 : dict(series="tppn", heading="avg throughput per node", xlabel="Time", ylabel="ops/sec per node", polarity=-1)
}
for r in resp.tables[0].tuples:
legend[r[0]] = dict(series=r[1], heading=r[2], xlabel=r[3], ylabel=r[4], polarity=r[5])
fns = [app]
flags = dict()
for r in legend.itervalues():
title = app + " " + r['heading']
fn = "_" + title.replace(" ","_") + ".png"
fns.append(prefix + fn)
f = plot(title, r['xlabel'], r['ylabel'], path + fn, width, height, app, data, r['series'], mindate, maxdate, r['polarity'])
flags.update(f)
fns.append(iorder)
fns.append(flags)
filenames.append(tuple(fns))
filenames.append(("KVBenchmark-five9s-latency", "", "", "http://ci/job/performance-nextrelease-5nines/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png", iorder))
filenames.append(("KVBenchmark-five9s-nofail-latency", "", "", "http://ci/job/performance-nextrelease-5nines-nofail/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png", iorder))
filenames.append(("KVBenchmark-five9s-nofail-nocl-latency", "", "", "http://ci/job/performance-nextrelease-5nines-nofail-nocl/lastSuccessfulBuild/artifact/pro/tests/apptests/savedlogs/5nines-histograms.png", iorder))
# generate index file
index_file = open(root_path + '-index.html', 'w')
sorted_filenames = sorted(filenames, key=lambda f: f[0].lower()+str(f[1]))
index_file.write(generate_index_file(sorted_filenames))
index_file.close()
if __name__ == "__main__":
main()
| agpl-3.0 |
kudkudak/r2-learner | misc/utils.py | 2 | 9006 | """
This file contains utility functions
"""
import time
import os
import cPickle
import numpy
from config import logger, c
import pickle
import numpy as np
import glob
import mmap
import pandas as pd
from scipy import sparse
import matplotlib
matplotlib.use('Qt4Agg')
# This will be used when used cache_ram=True
mem_storage = {}
def timed(func):
""" Decorator for easy time measurement """
def timed(*args, **dict_args):
tstart = time.time()
result = func(*args, **dict_args)
tend = time.time()
print(
"{0} ({1}, {2}) took {3:2.4f} s to execute".format(func.__name__, len(args), len(dict_args), tend - tstart))
return result
return timed
cache_dict = {}
def cached_in_memory(func):
global cache_dict
def func_caching(*args, **dict_args):
key = (func.__name__, args, frozenset(dict_args.items()))
if key in cache_dict:
return cache_dict[key]
else:
returned_value = func(*args, **dict_args)
cache_dict[key] = returned_value
return returned_value
return func_caching
from sklearn.externals import joblib
import os
def scikit_load(key):
dir = os.path.join(c["CACHE_DIR"], key)
file_name = os.path.join(os.path.join(c["CACHE_DIR"], dir), key + ".pkl")
return joblib.load(file_name)
def scikit_check(key):
dir = os.path.join(c["CACHE_DIR"], key)
return len(glob.glob(os.path.join(os.path.join(c["CACHE_DIR"], dir), key + ".pkl*"))) > 0
def scikit_save(key, val):
dir = os.path.join(c["CACHE_DIR"], key)
os.system("mkdir " + dir)
file_name = os.path.join(dir, key + ".pkl")
joblib.dump(val, file_name)
def scipy_csr_load(key):
file_name = os.path.join(c["CACHE_DIR"], key + ".npz")
f = np.load(file_name)
return sparse.csr_matrix((f["arr_0"], f["arr_1"], f["arr_2"]), shape=f["arr_3"])
def scipy_csr_check(key):
return os.path.exists(os.path.join(c["CACHE_DIR"], key + ".npz"))
def scipy_csr_save(key, val):
file_name = os.path.join(c["CACHE_DIR"], key)
np.savez(file_name, val.data, val.indices, val.indptr, val.shape)
def pandas_save_fnc(key, val):
file_name = os.path.join(c["CACHE_DIR"] + key + ".msg")
val.to_msgpack(file_name)
def pandas_check_fnc(key):
return os.path.exists(os.path.join(c["CACHE_DIR"] + key + ".msg"))
def pandas_load_fnc(key):
file_name = os.path.join(c["CACHE_DIR"] + key + ".msg")
return pd.read_msgpack(file_name)
def numpy_save_fnc(key, val):
if isinstance(val, tuple):
raise "Please use list to make numpy_save_fnc work"
# Note - not using savez because it is reportedly slow.
if isinstance(val, list):
logger.info("Saving as list")
save_path = os.path.join(c["CACHE_DIR"], key)
save_dict = {}
for id, ar in enumerate(val):
save_dict[str(id)] = ar
np.savez(save_path, **save_dict)
else:
logger.info("Saving as array " + str(val.shape))
np.save(os.path.join(c["CACHE_DIR"], key + ".npy"), val)
def numpy_check_fnc(key):
return len(glob.glob(os.path.join(c["CACHE_DIR"], key + ".np*"))) > 0
def numpy_load_fnc(key):
if os.path.exists(os.path.join(c["CACHE_DIR"], key + ".npz")):
# Listed numpy array
savez_file = np.load(os.path.join(c["CACHE_DIR"], key + ".npz"))
ar = []
for k in sorted(list((int(x) for x in savez_file))):
logger.info("Loading " + str(k) + " from " + str(key) + " " + str(savez_file[str(k)].shape))
ar.append(savez_file[str(k)])
return ar
else:
return np.load(os.path.join(c["CACHE_DIR"], key + ".npy"))
import hashlib
import sys
def generate_key(func_name, args, dict_args_original, skip_args):
args_concat = [v for key, v in sorted(dict_args_original.iteritems()) if key not in skip_args]
# Get serialized arguments (function name, or string of v if is not reference checked in ugly way
args_serialized = \
'_'.join([
v.__name__
if hasattr(v, '__call__')
else
(str(v) if len(str(v)) < 200 else hashlib.md5(str(v)).hexdigest())
for v in args_concat if hasattr(v, '__call__') or hasattr(v, "__init__") or str(v).find("0x") == -1])
logger.info("Serialized args to " + args_serialized)
key = func_name + "_" + args_serialized
full_key = func_name + "(" + "".join(
[str(k) + "=" + (str(v) if len(str(v)) < 200 else hashlib.md5(str(v)).hexdigest())
for k, v in sorted(dict_args_original.iteritems()) if key not in skip_args])
if len(key) > 400:
key = key[0:400]
return key, full_key
def cached_FS(save_fnc=None, load_fnc=None, check_fnc=None, skip_args=None, cache_ram=False, use_cPickle=False):
"""
To make it work correctly please pass parameters to function as dict
@param save_fnc, load_fnc function(key, returned_value)
@param check_fnc function(key) returning True/False
"""
if not skip_args:
skip_args = {}
def cached_HDD_inner(func):
def func_caching(*args, **dict_args):
"""
Use mmap only for files around 1/2 memory
"""
if len(args) > 0:
raise Exception("For cached_FS functions pass all args by dict_args (ensures cache resolution)")
# ## Dump special arguments
dict_args_original = dict(dict_args)
dict_args_original.pop("use_mmap", None)
dict_args_original.pop("force_reload", None)
key, fullkey = generate_key(func.__name__, args, dict_args_original, skip_args)
# For retrievel
if not os.path.exists(os.path.join(c["CACHE_DIR"], "cache_dict.txt")):
with open(os.path.join(c["CACHE_DIR"], "cache_dict.txt"), "w") as f:
f.write("File containing hashes explanation for refernce\n ==== \n")
with open(os.path.join(c["CACHE_DIR"], "cache_dict.txt"), "a") as f:
f.write(key + "\n" + fullkey + "\n")
if cache_ram and key in mem_storage:
print("Reading from cache ram")
return mem_storage[key]
logger.info("Checking key " + key)
cache_file_default = os.path.join(c["CACHE_DIR"], str(key) + ".cache.pkl")
exists = os.path.exists(cache_file_default) if check_fnc is None else check_fnc(key)
def evaluate_and_write():
logger.info("Cache miss or force reload. Caching " + key)
returned_value = func(*args, **dict_args_original)
if save_fnc:
save_fnc(key, returned_value)
else:
with open(cache_file_default, "w") as f:
if use_cPickle:
cPickle.dump(returned_value, f)
else:
pickle.dump(returned_value, f)
if cache_ram:
mem_storage[key] = returned_value
return returned_value
if exists and not "force_reload" in dict_args:
logger.info("Loading (pickled?) file")
if load_fnc:
return load_fnc(key)
else:
# We do try here because we might have failed writing pickle file before
try:
with open(cache_file_default, "r") as f:
if "use_mmap" in dict_args:
g = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)
obj = cPickle.load(g) if use_cPickle else pickle.load(g)
g.close()
return obj
else:
obj = cPickle.load(f) if use_cPickle else pickle.load(f)
return obj
except:
return evaluate_and_write()
else:
return evaluate_and_write()
return func_caching
return cached_HDD_inner
cached_FS_list_np = cached_FS(numpy_save_fnc, numpy_load_fnc, numpy_check_fnc)
if __name__ == "__main__":
@timed
@cached_FS()
def check_pickle(k=10, d=20, single=False):
x = np.ones(shape=(3000, 1000))
y = np.ones(shape=(10, 10))
return [x, y] if not single else x
@timed
@cached_FS(save_fnc=numpy_save_fnc, load_fnc=numpy_load_fnc, check_fnc=numpy_check_fnc)
def check_np(k=10, d=20, single=False):
x = np.ones(shape=(3000, 1000))
y = np.ones(shape=(10, 10))
return [x, y] if not single else x
print(check_pickle(single=False)[0].shape)
print(check_np(single=False)[0].shape)
"""
results: check_pickle - file 84kb, 5.37 to save 0.6351 to load
check_np - file 23kB, 0.69 to save, 0.0849 to load
"""
| mit |
PanDAWMS/panda-cacheschedconfig | cacheschedconfig/CacheSchedConfig.py | 1 | 7983 | #
# Dump schedconfig on a per-queue basis into cache files
#
# DB Connection
from cacheschedconfig.OraDBProxy2 import NewDBProxy as DBProxy
from copy import deepcopy
from pandaserver.config import panda_config
import sys
import os
import shutil
import json
class cacheSchedConfig:
'''
Class to dump schedconfig on a per-queue basis into cache files
'''
def __init__(self):
self.proxyS = None
self.queueData = None
self.cloudStatus = None
# Define this here, but could be more flexible...
self.queueDataFields = {
# Note that json dumps always use sort_keys=True; for pilot format
# the order defined here is respected
'pilot' : ['appdir', 'allowdirectaccess', 'cloud', 'datadir', 'dq2url', 'copytool', 'copytoolin',
'copysetup', 'copysetupin', 'ddm', 'se', 'sepath', 'seprodpath', 'envsetup', 'envsetupin',
'region', 'copyprefix', 'copyprefixin', 'lfcpath', 'lfcprodpath', 'lfchost', 'lfcregister',
'sein', 'wntmpdir', 'proxy', 'retry', 'recoverdir', 'space', 'memory', 'cmtconfig', 'status',
'setokens', 'glexec', 'seopt', 'gatekeeper', 'pcache', 'maxinputsize', 'timefloor',
'corecount', 'faxredirector', 'allowfax', 'maxtime', 'maxwdir',],
'factory' : ['site', 'siteid', 'nickname', 'cloud', 'status', 'jdl', 'queue', 'localqueue', 'nqueue',
'environ', 'proxy', 'glexec', 'depthboost', 'idlepilotsupression', 'pilotlimit', 'transferringlimit',
'memory', 'maxtime', 'system', 'fairsharepolicy','autosetup_pre','autosetup_post'],
# None is magic here and really means "all"
'all' : None,
}
def init(self, dbhost, dbpasswd, dbuser, dbname):
if self.proxyS == None:
self.proxyS = DBProxy()
self.proxyS.connect(dbhost, dbpasswd, dbuser, dbname)
def getStucturedQueueStatus(self):
self.getQueueData()
self.getCloudStatus()
self.maskQueuesByCloud()
def getQueueData(self, site = None, queue = None):
# Dump schedconfig in a single query (it's not very big)
varDict = {}
sql = 'SELECT panda_queue, data from {0}.SCHEDCONFIG_JSON'.format(panda_config.schemaPANDA)
if site:
sql += ' where panda_queue=:site'
varDict[':site'] = site
self.queueData = self.proxyS.queryColumnSQL(sql, varDict)
elif queue:
sql += ' where panda_queue=:queue'
varDict[':queue'] = queue
self.queueData = self.proxyS.queryColumnSQL(sql, varDict)
else:
self.queueData = self.proxyS.queryColumnSQL(sql)
def getCloudStatus(self):
sql = 'SELECT name, status from {0}.CLOUDCONFIG'.format(panda_config.schemaMETA)
r = self.proxyS.querySQL(sql)
self.cloudStatus = dict()
for row in r:
self.cloudStatus[row[0]] = row[1]
def maskQueuesByCloud(self):
'''Force queue status to offline if the cloud is offline'''
for queue in self.queueData:
try:
if self.cloudStatus[queue['cloud']] == 'offline':
queue['status'] = 'offline'
print ('Queue %s forced offline (cloud = %s is offline)' % (queue['nickname'], queue['cloud']))
except KeyError:
print ('No valid cloud status for queue %s (cloud = %s)' % (queue['nickname'], queue['cloud']))
def dumpSingleQueue(self, queueDict, dest = '/tmp', outputSet = 'all', format = 'txt'):
try:
file = os.path.join(dest, queueDict['nickname'] + "." + outputSet + "." + format)
output = open(file, "w")
outputFields = self.queueDataFields[outputSet]
if outputFields == None:
outputFields = queueDict.keys()
if format == 'txt':
for outputField in outputFields:
output.write(outputField + "=" + str(queueDict[outputField]))
if format == 'pilot':
outputStr = ''
for outputField in outputFields:
if outputField in queueDict and queueDict[outputField]:
outputStr += outputField + "=" + str(queueDict[outputField]) + "|"
else:
outputStr += outputField + "=|"
output.write(outputStr[:-1])
if format == 'json':
dumpMe = {}
for outputField in outputFields:
if outputField in queueDict:
val = queueDict[outputField]
else:
val = ''
dumpMe[outputField] = val
json.dump(self.queueDictPythonise(dumpMe), output, sort_keys=True, indent=4)
output.close()
# a copy of the file, when makes sense, with filename based on siteid
newfile = os.path.join(dest, queueDict['siteid'] + "." + outputSet + "." + format)
if newfile != file:
shutil.copy(file, newfile)
except Exception:
raise
def dumpQueues(self, queueArray, dest = '/tmp', outputSet = 'all', format = 'txt'):
for queueDict in queueArray:
self.dumpSingleQueue(queueArray, dest, outputSet, format)
def queueDictPythonise(self, queueDict, deepCopy = True):
'''Turn queue dictionary with SQL text fields into a more stuctured python representation'''
if deepCopy:
structDict = deepcopy(queueDict)
else:
structDict = queueDict
if 'releases' in structDict and structDict['releases'] != None:
if isinstance(structDict['releases'], str):
structDict['releases'] = structDict['releases'].split('|')
# TODO - Change this into Ricardo's ISO dateTime in UTC?
for timeKey in 'lastmod', 'tspace':
if timeKey in structDict:
structDict[timeKey] = structDict[timeKey].isoformat()
return structDict
def dumpAllSchedConfig(self, queueArray = None, dest='/tmp'):
'''Dumps all of schedconfig into a single json file - allows clients to retrieve a
machine readable version of schedconfig efficiently'''
file = os.path.join(dest, "schedconfig.all.json")
if queueArray == None:
queueArray = self.queueData
output = open(file, "w")
dumpMe = {}
for queueDict in queueArray:
dumpMe[queueDict['nickname']] = {}
for k in queueDict:
v = queueDict[k]
dumpMe[queueDict['nickname']][k] = v
dumpMe[queueDict['nickname']] = self.queueDictPythonise(dumpMe[queueDict['nickname']])
json.dump(dumpMe, output, sort_keys=True, indent=4)
self.dump_pilot_gdp_config(dest)
def dump_pilot_gdp_config(self, dest='/tmp'):
app = 'pilot'
dump_me = {}
sql = 'SELECT key, component, vo from {}.config where app=:app'.format(panda_config.schemaPANDA)
r = self.proxyS.querySQL(sql, {':app': app})
for key, component, vo in r:
dump_me.setdefault(vo, {})
value = self.proxyS.getConfigValue(component, key, app, vo)
dump_me[vo][key] = value
# dump
print("pilot GDP config: {}".format(str(dump_me)))
with open(os.path.join(dest, 'pilot_gdp_config.json'), 'w') as f:
json.dump(dump_me, f, sort_keys=True, indent=4)
| apache-2.0 |
mavidser/tardis-web-interface | docs/physics/plasma/plasma_plots/nebular_ionization_balance.py | 13 | 4485 | import os
from matplotlib import colors
from tardis import atomic, plasma_array, util
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
#Making 2 Figures for ionization balance and level populations
plt.figure(1).clf()
ax1 = plt.figure(1).add_subplot(111)
plt.figure(2).clf()
ax2 = plt.figure(2).add_subplot(111)
# expanding the tilde to the users directory
atom_fname = os.path.expanduser('~/.tardis/si_kurucz.h5')
# reading in the HDF5 File
atom_data = atomic.AtomData.from_hdf5(atom_fname)
#The atom_data needs to be prepared to create indices. The Class needs to know which atomic numbers are needed for the
#calculation and what line interaction is needed (for "downbranch" and "macroatom" the code creates special tables)
atom_data.prepare_atom_data([14], 'scatter')
#Initializing the NebularPlasma class using the from_abundance class method.
#This classmethod is normally only needed to test individual plasma classes
#Usually the plasma class just gets the number densities from the model class
nebular_plasma = plasma.NebularPlasma.from_abundance(10000, 0.5, {'Si': 1}, 1e-13, atom_data, 10.)
#Initializing a dataframe to store the ion populations and level populations for the different temperatures
ion_number_densities = pd.DataFrame(index=nebular_plasma.ion_populations.index)
level_populations = pd.DataFrame(index=nebular_plasma.level_populations.ix[14, 1].index)
t_rads = np.linspace(2000, 20000, 100)
#Calculating the different ion populations and level populuatios for the given temperatures
for t_rad in t_rads:
nebular_plasma.update_radiationfield(t_rad, w=1.0)
#getting total si number density
si_number_density = nebular_plasma.number_density.get_value(14)
#Normalizing the ion populations
ion_density = nebular_plasma.ion_populations / si_number_density
ion_number_densities[t_rad] = ion_density
#normalizing the level_populations for Si II
current_level_population = nebular_plasma.level_populations.ix[14, 1] / nebular_plasma.ion_populations.ix[14, 1]
#normalizing with statistical weight
current_level_population /= atom_data.levels.ix[14, 1].g
level_populations[t_rad] = current_level_population
ion_colors = ['b', 'g', 'r', 'k']
for ion_number in [0, 1, 2, 3]:
current_ion_density = ion_number_densities.ix[14, ion_number]
ax1.plot(current_ion_density.index, current_ion_density.values, '%s-' % ion_colors[ion_number],
label='Si %s W=1.0' % util.int_to_roman(ion_number + 1).upper())
#only plotting every 5th radiation temperature
t_rad_normalizer = colors.Normalize(vmin=2000, vmax=20000)
t_rad_color_map = plt.cm.ScalarMappable(norm=t_rad_normalizer, cmap=plt.cm.jet)
for t_rad in t_rads[::5]:
ax2.plot(level_populations[t_rad].index, level_populations[t_rad].values, color=t_rad_color_map.to_rgba(t_rad))
ax2.semilogy()
#Calculating the different ion populations for the given temperatures with W=0.5
ion_number_densities = pd.DataFrame(index=nebular_plasma.ion_populations.index)
for t_rad in t_rads:
nebular_plasma.update_radiationfield(t_rad, w=0.5)
#getting total si number density
si_number_density = nebular_plasma.number_density.get_value(14)
#Normalizing the ion populations
ion_density = nebular_plasma.ion_populations / si_number_density
ion_number_densities[t_rad] = ion_density
#normalizing the level_populations for Si II
current_level_population = nebular_plasma.level_populations.ix[14, 1] / nebular_plasma.ion_populations.ix[14, 1]
#normalizing with statistical weight
current_level_population /= atom_data.levels.ix[14, 1].g
level_populations[t_rad] = current_level_population
#Plotting the ion fractions
for ion_number in [0, 1, 2, 3]:
print "w=0.5"
current_ion_density = ion_number_densities.ix[14, ion_number]
ax1.plot(current_ion_density.index, current_ion_density.values, '%s--' % ion_colors[ion_number],
label='Si %s W=0.5' % util.int_to_roman(ion_number + 1).upper())
for t_rad in t_rads[::5]:
ax2.plot(level_populations[t_rad].index, level_populations[t_rad].values, color=t_rad_color_map.to_rgba(t_rad),
linestyle='--')
ax2.semilogy()
t_rad_color_map.set_array(t_rads)
cb = plt.figure(2).colorbar(t_rad_color_map)
ax1.set_xlabel('T [K]')
ax1.set_ylabel('Number Density Fraction')
ax1.legend()
ax2.set_xlabel('Level Number for Si II')
ax2.set_ylabel('Number Density Fraction')
cb.set_label('T [K]')
plt.show() | bsd-3-clause |
jldbc/pybaseball | tests/integration/pybaseball/test_statcast.py | 1 | 2718 | from datetime import date, timedelta
import pandas as pd
import pytest
from pybaseball.statcast import _handle_request, _small_request, statcast, statcast_single_game
from pybaseball.utils import sanitize_date_range
from tests.conftest import CURRENT_SC_COLUMNS
def test_small_request() -> None:
start_dt, end_dt = sanitize_date_range('2019-06-01', None)
result = _small_request(start_dt, end_dt)
assert result is not None
assert not result.empty
assert len(result.columns) == CURRENT_SC_COLUMNS
assert len(result) == 4556
def test_statcast() -> None:
result = statcast('2019-05-01', '2019-05-04')
assert result is not None
assert not result.empty
assert len(result.columns) == CURRENT_SC_COLUMNS
assert len(result) == 16130
def test_statcast_chunking() -> None:
result = statcast('2019-05-01', '2019-05-15').reset_index(drop=True)
assert result is not None
assert not result.empty
day_results = []
start_date = date(2019, 5, 1)
for day in range(15):
day_results.append(statcast(str(start_date + timedelta(days=day))))
day_results_dataframe = pd.concat(day_results, axis=0).convert_dtypes(convert_string=False)
day_results_dataframe = day_results_dataframe.sort_values(
['game_date', 'game_pk', 'at_bat_number', 'pitch_number'],
ascending=False
).reset_index(drop=True)
assert list(result.columns) == list(day_results_dataframe.columns)
assert len(result) == len(day_results_dataframe)
def test_handle_request_pre_season() -> None:
start_dt, end_dt = sanitize_date_range('2019-03-01', '2019-03-22')
result = _handle_request(start_dt, end_dt, step=1, verbose=False)
assert result is not None
assert not result.empty
assert len(result.columns) == CURRENT_SC_COLUMNS
assert len(result) == 689
def test_handle_request_post_season() -> None:
start_dt, end_dt = sanitize_date_range('2018-11-14', '2019-03-22')
with pytest.warns(UserWarning):
result = _handle_request(start_dt, end_dt, step=1, verbose=False)
assert result is not None
assert not result.empty
assert len(result.columns) == CURRENT_SC_COLUMNS
assert len(result) == 689
def test_handle_request_post_season_same_year() -> None:
start_dt, end_dt = sanitize_date_range('2018-11-14', '2018-11-30')
result = _handle_request(start_dt, end_dt, step=1, verbose=False)
assert result is not None
assert result.empty
def test_single_game_request() -> None:
result = statcast_single_game(289317)
assert result is not None
assert not result.empty
assert len(result.columns) == CURRENT_SC_COLUMNS
assert len(result) == 462
| mit |
jlegendary/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
MTgeophysics/mtpy | examples/scripts/ModEM_PlotRMS.py | 1 | 2589 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 04 13:13:29 2016
@author: Alison Kirkby
Plots data/model misfit for a given period, at all sites, separately for each
of the impedance tensor modes + tipper
Revision History:
[email protected] 31-03-2020 13:42:41 AEDT:
- Add option for plotting impedance or tippers
- Add shapefile creation
- Allow specifying period in seconds
- Add plotting on geotiff background
"""
import os
os.chdir(r'C:\mtpywin\mtpy')
import os.path as op
import numpy as np
from mtpy.modeling.modem import PlotRMSMaps
wd = r'C:\mtpywin\mtpy\examples\model_files\ModEM_2'
savepath = r'C:\tmp'
filestem = op.join(wd,'Modular_MPI_NLCG_004')
resid_fn=op.join(wd,filestem + '.res')
# Parameter explanations (TODO: add to user guide):
# plot_elements: can plot only impedance or tippers by setting to
# 'impedance' or 'tippers'. Set as 'both' or leave out to plot impedance
# and tippers.
# bimg: path to a geotiff to use as map background image. If the CRS of
# the geotiff and the model differ, then 'PlotRMSMaps.model_epsg' must
# also be provided so model coordinates can be correctly projected onto
# the geotiff.
# period: can choose period by providing 'period_index' or by providing
# period in seconds to 'period'. If 'period' is provided it will take
# priority over 'period_index'. The closest available period will be
# selected, so the plotted period may be different from what was chosen.
probj = PlotRMSMaps(resid_fn,
# period=100., # can specify a period in seconds
period_index='all',
rms_cmap='jet', # choose matplotlib colormap or set to None
rms_max=5,
plot_elements='both',
# bimg=r'C:\path\to\a\background_image.tif'
)
# Can write RMS map as shapefiles by calling 'create_shapefiles'. This
# will use the period, plot_elements etc. attributes provided to the
# PlotRMSMaps class above.
# dst_epsg: the CRS of the shapefile output. This should be set to the
# EPSG code of the geotiff the shapefiles are intended to be displayed
# on.
# save_path: an optional save_path. If not provided, will save to the
# PlotRMSMaps.save_path atribute. Shapefiles will be saved within
# subdirectories labelled with component and period within this
# directory.
probj.create_shapefiles(dst_epsg=4326, save_path=savepath)
probj.save_figure(save_path=savepath,
save_fig_dpi = 400 # change to your preferred figure resolution
)
| gpl-3.0 |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cocoaagg.py | 70 | 8970 | from __future__ import division
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except:
print >>sys.stderr, 'The CococaAgg backend required PyObjC to be installed!'
print >>sys.stderr, ' (currently testing v1.3.7)'
sys.exit()
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasCocoaAgg(thisFig)
return FigureManagerCocoaAgg(canvas, num)
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(0,0),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_press_event(loc.x, loc.y, button)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print >>sys.stderr, 'Unable to load Matplotlib Cocoa UI!'
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print >>sys.stderr, 'ApplicationServices missing'
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, 'Missing', fn
return False
err, psn = d['GetCurrentProcess']()
if err:
print >>sys.stderr, 'GetCurrentProcess', (err, psn)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print >>sys.stderr, 'CPSSetProcessName', (err, psn)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print >>sys.stderr, 'SetFrontProcess', (err, psn)
return False
return True
| agpl-3.0 |
glennq/scikit-learn | sklearn/utils/multiclass.py | 41 | 14732 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
| bsd-3-clause |
y-j-n/pyFastfusion | modeler/cols_plot.py | 1 | 7186 | #!/usr/bin/env python
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import time
import sys
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
def get_fig_2d(list_hor, list_list_ver, title='title'):
fig = plt.figure()
ax = fig.add_subplot(111)
list_colors = ['r', 'g', 'b', 'c', 'm', 'y']
for i, list_ver in enumerate(list_list_ver):
color = list_colors[i % 6]
ax.plot(list_hor, list_ver, c=color)
ax.set_xlabel('list_hor')
ax.set_ylabel('list_ver')
ax.set_title(title)
return fig
def get_fig_scatter3d(list_x, list_y, list_z, title='title', scale=[-1, 1]):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = np.array(list_x)
ys = np.array(list_y)
zs = np.array(list_z)
ax.scatter(xs, ys, zs, c='r', marker='o')
ax.auto_scale_xyz(scale, scale, scale)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(title)
return fig
def cut_cols(list_target_cols, filename, delimiters=' '):
dict_list_cols = {}
for col_index in list_target_cols:
dict_list_cols[col_index] = []
#print dict_list_cols
with open(filename) as f:
for line in f:
if line.startswith('#'):
continue
#time, tx, ty, tz, qx, qy, qz, qw = l.split()
list_nums = line.split(delimiters)
for target in dict_list_cols.keys():
#print target
list_col = dict_list_cols[target]
num = float(list_nums[target])
list_col.append(num)
return dict_list_cols
def plot_poscam():
#filename = '../data-fastfusion-tum/rgbd_dataset_freiburg3_long_office_household/groundtruth.txt'
#filename = '../data-fastfusion-tum/rgbd_dataset_freiburg3_long_office_household/assoc_opt_traj_final.txt'
# dict_list_cols = cut_cols([0, 1, 2], ',log-posCam')
# list_x = dict_list_cols[0]
# list_y = dict_list_cols[1]
# list_z = dict_list_cols[2]
# dict_list_cols = cut_cols([2, 3, 4], 'log-poscam--m200')
# list_x = dict_list_cols[2]#[9:19]
# list_y = dict_list_cols[3]#[9:19]
# list_z = dict_list_cols[4]#[9:19]
#dict_list_cols = cut_cols([2, 3, 4], 'log-poscam--ambassador')
# dict_list_cols = cut_cols([2, 3, 4], 'log-poscam--orig-mod')
dict_list_cols = cut_cols([2, 3, 4], 'log-poscam')
list_x = dict_list_cols[2]
list_y = dict_list_cols[3]
list_z = dict_list_cols[4]
#print dict_list_cols
#exit()
fig_cam_xyz = get_fig_2d(range(len(list_x)), [list_x, list_y, list_z], 'camXYZ')
fig_cam_traj = get_fig_scatter3d(list_x, list_y, list_z, 'camTraj')
#fig.show()
#time.sleep(120)
plt.show() # show all figs created
def plot_kp():
if 0:
#filename = 'log-k_P'
#filename = 'log-k_P-freiburg3'
filename = 'log-kp-grass'
#filename = 'log-kp-sq'
col = cut_cols([3, 4, 5], filename) # col is a dictionary
fig = get_fig_scatter3d(col[3], col[4], col[5], 'k_P')
#fig.show()
#time.sleep(120)
plt.show() # show all figs created
if 1:
#filename = '2.610428_raw.csv'
#filename = '2.610428_model.csv'
filename = '2.744492_model.csv'
col = cut_cols([2, 3, 4], filename, ';')
fig = get_fig_scatter3d(col[2], col[3], col[4], filename)
plt.show() # show all figs created
def test_GLScatterPlotItem():
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 20
w.show()
w.setWindowTitle('pyqtgraph example: GLScatterPlotItem mods')
g = gl.GLGridItem()
w.addItem(g)
#-----------------
if 0:
sp_origin = gl.GLScatterPlotItem(
pos=np.array([(0, 0, 0)]), size=0.5,
color=(1.0, 0.0, 0.0, 0.5), pxMode=False)
#sp_origin.translate(5, 5, 0)
w.addItem(sp_origin)
if 0:
# http://www.pyqtgraph.org/documentation/3dgraphics/glscatterplotitem.html
# orig: ../opengl/pyqtgraph-develop/examples/GLScatterPlotItem.py
pos = np.zeros((3, 3))
size = np.zeros((3))
color = np.zeros((3, 4))
pos[0] = (0,0,0)
pos[1] = (1,1,1)
pos[2] = (2,2,2)
size[0] = 0.5
size[1] = 0.2
size[2] = 2./3.
color[0] = (1.0, 0.0, 0.0, 0.5)
color[1] = (0.0, 0.0, 1.0, 0.5)
color[2] = (0.0, 1.0, 0.0, 0.5)
print pos
sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
sp1.translate(5,5,0)
w.addItem(sp1)
if 0:
filename = '2.403822_model_large.csv'
bbox = (15, 55, 264, 370)
# filename = '6.961998_model_large.csv'
# bbox = (306, 103, 594, 435)
print 'filename: '+filename
col = cut_cols([0, 1, 2, 3, 4, 5, 6, 7], filename, ';')
npa_ij = np.array([col[0], col[1]]).transpose()
print 'finding indices in bbox...'
li_indices_bb = []
i_min = bbox[0]
i_max = bbox[2]
j_min = bbox[1]
j_max = bbox[3]
for index, ij in enumerate(npa_ij):
if i_min <= ij[0] <= i_max:
if j_min <= ij[1] <= j_max:
li_indices_bb.append(index)
print '# of pixels in bbox: %d' % len(li_indices_bb)
xyz = np.array([col[2], col[3], col[4]]).transpose()
xyz_bb = xyz[li_indices_bb, :]
# print xyz
# print xyz_bb
if 1:
csv_out = 'renameme_bbox.csv'
print 'dumping %s ...' % csv_out
npa_csv = np.array([col[0], col[1],
col[2], col[3], col[4],
col[5], col[6], col[7]]).transpose()
with open(csv_out, 'w') as f:
for index in li_indices_bb:
vals = npa_csv[index, :]
f.write('%d;%d;%f;%f;%f;%d;%d;%d\n' % (vals[0], vals[1],
vals[2], vals[3], vals[4],
vals[5], vals[6], vals[7]))
print 'done.'
rgb = [(1.0, 0.0, 0.0, 0.5),
(0.0, 1.0, 0.0, 0.5),
(0.0, 0.0, 1.0, 0.5)]
w.addItem(gl.GLScatterPlotItem(pos=xyz, size=2.0, color=rgb[0], pxMode=True))
w.addItem(gl.GLScatterPlotItem(pos=xyz_bb, size=2.0, color=rgb[1], pxMode=True))
if 1:
filename = 'ellispsoid.csv'
print 'filename: '+filename
col = cut_cols([0, 1, 2], filename, ';')
xyz = np.array([col[0], col[1], col[2]]).transpose()
print '# of points:'
print xyz.shape
rgb = [(1.0, 0.0, 0.0, 0.5),
(0.0, 1.0, 0.0, 0.5),
(0.0, 0.0, 1.0, 0.5)]
w.addItem(gl.GLScatterPlotItem(pos=xyz, size=2.0, color=rgb[0], pxMode=True))
#-----------------
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
#plot_poscam()
#plot_kp()
test_GLScatterPlotItem()
| gpl-2.0 |
dimaleks/uDeviceX | tests/md/multistage.py | 1 | 3377 | #!/usr/bin/env python
from mpi4py import MPI
import mirheo as mir
import numpy as np
import sys
from utils import get_h5_forces
def main():
# Units. 1 == Mirheo unit.
nm = 1
fs = 1
kg = 1e27
K = 1
m = 1e9 * nm
s = 1e15 * fs
J = kg * m ** 2 / s ** 2
# Argon and system properties.
epsilon = 996. * J / 6.022e23
sigma = 0.340 * nm
mass = 39.948 * 1.66053906660e-27 * kg
kB = 1.380649e-23 * J / K
max_displacement = 0.005 * nm
number_density = 0.1 / sigma ** 3 # Use very small density for testing.
domain = (12 * nm, 10 * nm, 8 * nm)
u = mir.Mirheo((1, 1, 1), domain, debug_level=3, log_filename='log', no_splash=True)
pv = mir.ParticleVectors.ParticleVector('pv', mass=mass)
ic = mir.InitialConditions.Uniform(number_density=number_density)
u.registerParticleVector(pv, ic)
lj = mir.Interactions.Pairwise('lj', rc=1 * nm, kind='LJ', epsilon=epsilon, sigma=sigma)
u.registerInteraction(lj)
u.setInteraction(lj, pv, pv)
##############################
# Stage 1.
##############################
int_min = mir.Integrators.Minimize('minimize', max_displacement=max_displacement)
u.registerIntegrator(int_min)
u.setIntegrator(int_min, pv)
# Measure forces, they should decrease over time.
plugin_force_saver = mir.Plugins.createForceSaver('forceSaver', pv)
plugin_dump = mir.Plugins.createDumpParticles('meshdump', pv, 200, ['forces'], 'h5/pv-')
u.registerPlugins(plugin_force_saver)
u.registerPlugins(plugin_dump)
u.run(1001, dt=0.01 * fs)
u.deregisterPlugins(plugin_dump)
u.deregisterPlugins(plugin_force_saver)
u.deregisterIntegrator(int_min)
del plugin_dump
del plugin_force_saver
##############################
# Stage 2.
##############################
int_vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(int_vv)
u.setIntegrator(int_vv, pv)
# Measure temperature, it should approach the reference temperature.
u.registerPlugins(mir.Plugins.createBerendsenThermostat('thermostat', [pv], kBT=215 * K * kB, tau=50.0 * fs))
u.registerPlugins(mir.Plugins.createStats('stats', every=50, filename='stats.csv'))
u.run(500, dt=1 * fs)
##############################
# Stage 3.
##############################
u.run(200, dt=5 * fs)
if MPI.COMM_WORLD.rank == 0:
print("log(force) during minimization", file=sys.stderr)
for i in range(5):
forces = get_h5_forces('h5/pv-{:05}.h5'.format(i))
forces = np.sqrt(forces[:, 0]**2 + forces[:, 1]**2 + forces[:, 2]**2)
forces = np.sum(forces)
# Using log because nTEST has a tolerance of 0.1.
print(i, np.log(forces + 1e-9) * 100, file=sys.stderr)
# This could be done with pandas, but better to avoid importing it.
print("temperature during equilibration and the run", file=sys.stderr)
with open('stats.csv', 'r') as f:
header = f.readline().split(',')
assert header[0:2] == ['time', 'kBT']
mat = np.loadtxt(f, delimiter=',', usecols=(0, 1))
mat[:, 1] /= kB
for row in mat:
print(*row, file=sys.stderr)
main()
# nTEST: md.multistage
# cd md
# mir.run --runargs "-n 2" ./multistage.py > /dev/null 2> multistage.out.txt
| gpl-3.0 |
HeraclesHX/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
jmchen-g/models | transformer/cluttered_mnist.py | 1 | 6322 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from spatial_transformer import transformer
from scipy import ndimage
import numpy as np
import matplotlib.pyplot as plt
from tf_utils import conv2d, linear, weight_variable, bias_variable, dense_to_one_hot
# %% Load data
mnist_cluttered = np.load('./data/mnist_sequence1_sample_5distortions5x5.npz')
X_train = mnist_cluttered['X_train']
y_train = mnist_cluttered['y_train']
X_valid = mnist_cluttered['X_valid']
y_valid = mnist_cluttered['y_valid']
X_test = mnist_cluttered['X_test']
y_test = mnist_cluttered['y_test']
# % turn from dense to one hot representation
Y_train = dense_to_one_hot(y_train, n_classes=10)
Y_valid = dense_to_one_hot(y_valid, n_classes=10)
Y_test = dense_to_one_hot(y_test, n_classes=10)
# %% Graph representation of our network
# %% Placeholders for 40x40 resolution
x = tf.placeholder(tf.float32, [None, 1600])
y = tf.placeholder(tf.float32, [None, 10])
# %% Since x is currently [batch, height*width], we need to reshape to a
# 4-D tensor to use it in a convolutional graph. If one component of
# `shape` is the special value -1, the size of that dimension is
# computed so that the total size remains constant. Since we haven't
# defined the batch dimension's shape yet, we use -1 to denote this
# dimension should not change size.
x_tensor = tf.reshape(x, [-1, 40, 40, 1])
# %% We'll setup the two-layer localisation network to figure out the parameters for an affine transformation of the input
# %% Create variables for fully connected layer
W_fc_loc1 = weight_variable([1600, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, 6])
initial = np.array([[1.,0, 0],[0,1.,0]]) # Use identity transformation as starting point
initial = initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
# %% Define the two layer localisation network
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
keep_prob = tf.placeholder(tf.float32)
h_fc_loc1_drop = tf.nn.dropout(h_fc_loc1, keep_prob)
# %% Second layer
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1_drop, W_fc_loc2) + b_fc_loc2)
# %% We'll create a spatial transformer module to identify discriminative patches
h_trans = transformer(x_tensor, h_fc_loc2, downsample_factor=1)
# %% We'll setup the first convolutional layer
# Weight matrix is [height x width x input_channels x output_channels]
filter_size = 3
n_filters_1 = 16
W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
# %% Bias is [output_channels]
b_conv1 = bias_variable([n_filters_1])
# %% Now we can build a graph which does the first layer of convolution:
# we define our stride as batch x height x width x channels
# instead of pooling, we use strides of 2 and more layers
# with smaller filters.
h_conv1 = tf.nn.relu(
tf.nn.conv2d(input=h_trans,
filter=W_conv1,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv1)
# %% And just like the first layer, add additional layers to create
# a deep net
n_filters_2 = 16
W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
b_conv2 = bias_variable([n_filters_2])
h_conv2 = tf.nn.relu(
tf.nn.conv2d(input=h_conv1,
filter=W_conv2,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv2)
# %% We'll now reshape so we can connect to a fully-connected layer:
h_conv2_flat = tf.reshape(h_conv2, [-1, 10 * 10 * n_filters_2])
# %% Create a fully-connected layer:
n_fc = 1024
W_fc1 = weight_variable([10 * 10 * n_filters_2, n_fc])
b_fc1 = bias_variable([n_fc])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# %% And finally our softmax layer:
W_fc2 = weight_variable([n_fc, 10])
b_fc2 = bias_variable([10])
y_pred = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
opt = tf.train.AdamOptimizer()
optimizer = opt.minimize(cross_entropy)
grads = opt.compute_gradients(cross_entropy, [b_fc_loc2])
# %% Monitor accuracy
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% We'll now train in minibatches and report accuracy, loss:
iter_per_epoch = 100
n_epochs = 500
train_size = 10000
indices = np.linspace(0,10000 - 1,iter_per_epoch)
indices = indices.astype('int')
for epoch_i in range(n_epochs):
for iter_i in range(iter_per_epoch - 1):
batch_xs = X_train[indices[iter_i]:indices[iter_i+1]]
batch_ys = Y_train[indices[iter_i]:indices[iter_i+1]]
if iter_i % 10 == 0:
loss = sess.run(cross_entropy,
feed_dict={
x: batch_xs,
y: batch_ys,
keep_prob: 1.0
})
print('Iteration: ' + str(iter_i) + ' Loss: ' + str(loss))
sess.run(optimizer, feed_dict={
x: batch_xs, y: batch_ys, keep_prob: 0.8})
print('Accuracy: ' + str(sess.run(accuracy,
feed_dict={
x: X_valid,
y: Y_valid,
keep_prob: 1.0
})))
#theta = sess.run(h_fc_loc2, feed_dict={
# x: batch_xs, keep_prob: 1.0})
#print(theta[0])
| apache-2.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/test_dtypes.py | 1 | 6523 | # -*- coding: utf-8 -*-
from itertools import product
import nose
import numpy as np
from pandas import Series, Categorical, date_range
from pandas.core.common import (CategoricalDtype, is_categorical_dtype,
is_categorical, DatetimeTZDtype,
is_datetime64tz_dtype, is_datetimetz,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype)
import pandas.util.testing as tm
_multiprocess_can_split_ = True
class Base(object):
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
self.assertRaises(self.dtype == 'foo')
def test_numpy_informed(self):
# np.dtype doesn't know about our new dtype
def f():
np.dtype(self.dtype)
self.assertRaises(TypeError, f)
self.assertNotEqual(self.dtype, np.str_)
self.assertNotEqual(np.str_, self.dtype)
def test_pickle(self):
result = self.round_trip_pickle(self.dtype)
self.assertEqual(result, self.dtype)
class TestCategoricalDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = CategoricalDtype()
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype, 'category'))
self.assertTrue(is_dtype_equal(self.dtype, CategoricalDtype()))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(CategoricalDtype.is_dtype(self.dtype))
self.assertTrue(CategoricalDtype.is_dtype('category'))
self.assertTrue(CategoricalDtype.is_dtype(CategoricalDtype()))
self.assertFalse(CategoricalDtype.is_dtype('foo'))
self.assertFalse(CategoricalDtype.is_dtype(np.float64))
def test_basic(self):
self.assertTrue(is_categorical_dtype(self.dtype))
factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'
])
s = Series(factor, name='A')
# dtypes
self.assertTrue(is_categorical_dtype(s.dtype))
self.assertTrue(is_categorical_dtype(s))
self.assertFalse(is_categorical_dtype(np.dtype('float64')))
self.assertTrue(is_categorical(s.dtype))
self.assertTrue(is_categorical(s))
self.assertFalse(is_categorical(np.dtype('float64')))
self.assertFalse(is_categorical(1.0))
class TestDatetimeTZDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = DatetimeTZDtype('ns', 'US/Eastern')
def test_construction(self):
self.assertRaises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
self.assertTrue(issubclass(type(a), type(a)))
self.assertTrue(issubclass(type(a), type(b)))
def test_compat(self):
self.assertFalse(is_datetime64_ns_dtype(self.dtype))
self.assertFalse(is_datetime64_ns_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(is_datetime64_dtype(self.dtype))
self.assertFalse(is_datetime64_dtype('datetime64[ns, US/Eastern]'))
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(DatetimeTZDtype.is_dtype('foo'))
self.assertTrue(DatetimeTZDtype.is_dtype(DatetimeTZDtype(
'ns', 'US/Pacific')))
self.assertFalse(DatetimeTZDtype.is_dtype(np.float64))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype,
'datetime64[ns, US/Eastern]'))
self.assertTrue(is_dtype_equal(self.dtype, DatetimeTZDtype(
'ns', 'US/Eastern')))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
self.assertFalse(is_dtype_equal(self.dtype, DatetimeTZDtype('ns',
'CET')))
self.assertFalse(is_dtype_equal(
DatetimeTZDtype('ns', 'US/Eastern'), DatetimeTZDtype(
'ns', 'US/Pacific')))
# numpy compat
self.assertTrue(is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]"))
def test_basic(self):
self.assertTrue(is_datetime64tz_dtype(self.dtype))
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
self.assertTrue(is_datetime64tz_dtype(s.dtype))
self.assertTrue(is_datetime64tz_dtype(s))
self.assertFalse(is_datetime64tz_dtype(np.dtype('float64')))
self.assertFalse(is_datetime64tz_dtype(1.0))
self.assertTrue(is_datetimetz(s))
self.assertTrue(is_datetimetz(s.dtype))
self.assertFalse(is_datetimetz(np.dtype('float64')))
self.assertFalse(is_datetimetz(1.0))
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
self.assertTrue(is_datetimetz(s1))
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
self.assertTrue(is_datetimetz(s2))
self.assertEqual(s1.dtype, s2.dtype)
def test_parser(self):
# pr #11245
for tz, constructor in product(('UTC', 'US/Eastern'),
('M8', 'datetime64')):
self.assertEqual(
DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)),
DatetimeTZDtype('ns', tz),
)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
3manuek/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 44 | 17033 | import tempfile
import shutil
import os.path as op
import warnings
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
temp_folder = tempfile.mkdtemp()
try:
fpath = op.join(temp_folder, 'data.pkl')
joblib.dump(splitted_data, fpath)
X_train, X_test, y_train, y_test = joblib.load(fpath, mmap_mode='r')
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
finally:
# try to release the mmap file handle in time to be able to delete
# the temporary folder under windows
del X_train, X_test, y_train, y_test
try:
shutil.rmtree(temp_folder)
except shutil.WindowsError:
warnings.warn("Could not delete temporary folder %s" % temp_folder)
| bsd-3-clause |
goldentom42/predictor_stacker | linear_stacker/examples/regression.py | 1 | 4250 | from sklearn import __version__ as sklearn_version
from sklearn.datasets import load_boston
from sklearn.linear_model import Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
import pandas as pd
import numpy as np
from linear_stacker import RegressionLinearPredictorStacker
try:
from sklearn.model_selection import KFold
except ImportError:
from sklearn.cross_validation import KFold
def get_folds(data):
"""returns correct folding generator for different versions of sklearn"""
if sklearn_version.split('.')[1] == '18':
# Module model_selection is in the distribution
kf = KFold(n_splits=5, shuffle=True, random_state=1)
return kf.split(data)
else:
# Module model_selection is not in the distribution
kf = KFold(n=len(data), n_folds=5, shuffle=True, random_state=1)
return kf
def main():
pd.options.display.max_rows = 600
# Load boston dataset
dataset = load_boston()
regressors = [
('ridge', Ridge(alpha=0.001, normalize=True, random_state=0)),
('lasso', Lasso(alpha=0.01, normalize=True, random_state=1)),
('xtr', ExtraTreesRegressor(n_estimators=50, max_features=.4, max_depth=10, random_state=2, n_jobs=-1)),
('rfr', RandomForestRegressor(n_estimators=50, max_features=.2, max_depth=10, random_state=3, n_jobs=-1)),
('gbr', GradientBoostingRegressor(n_estimators=100, max_depth=2, learning_rate=.1, random_state=4))
]
# Go through classifiers
oof_preds = np.zeros((len(dataset.data), len(regressors)))
for reg_i, (name, reg) in enumerate(regressors):
# compute out of fold (OOF) predictions
for trn_idx, val_idx in get_folds(dataset.data):
# Split data in training and validation sets
trn_X, trn_Y = dataset.data[trn_idx], dataset.target[trn_idx]
val_X = dataset.data[val_idx]
# Fit the regressor
reg.fit(trn_X, trn_Y)
# Predict OOF data
oof_preds[val_idx, reg_i] = reg.predict(val_X)
# Display OOF score
print("MSE for regressor %6s : %.5f" % (name, mean_squared_error(dataset.target, oof_preds[:, reg_i])))
# First test using standard algorithm
stacker = RegressionLinearPredictorStacker(metric=mean_squared_error,
algo='standard',
max_iter=100,
verbose=0,
normed_weights=True)
stacker.fit(pd.DataFrame(oof_preds, columns=[name for (name, _) in regressors]),
pd.Series(dataset.target, name='target'))
print("Standard stacker score with normed weights : %.5f"
% (mean_squared_error(dataset.target, stacker.predict(oof_preds))))
# Second test using non normed weights
stacker = RegressionLinearPredictorStacker(metric=mean_squared_error,
algo='standard',
max_iter=100,
verbose=0,
normed_weights=False)
stacker.fit(pd.DataFrame(oof_preds, columns=[name for (name, _) in regressors]),
pd.Series(dataset.target, name='target'))
print("Standard stacker score without normed weights : %.5f"
% (mean_squared_error(dataset.target, stacker.predict(oof_preds))))
stacker = RegressionLinearPredictorStacker(metric=mean_squared_error,
algo='swapping',
max_iter=100,
verbose=0,
normed_weights=True)
stacker.fit(pd.DataFrame(oof_preds, columns=[name for (name, _) in regressors]),
pd.Series(dataset.target, name='target'))
print("Swapping stacker score without normed weights : %.5f"
% (mean_squared_error(dataset.target, stacker.predict(oof_preds))))
if __name__ == '__main__':
main()
| apache-2.0 |
Nyker510/scikit-learn | sklearn/metrics/tests/test_ranking.py | 75 | 40883 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
alassman/robotnav | Archer_data.py | 1 | 2169 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 10:26:32 2016
@author: Michael
"""
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
import socket
from time import sleep
import re
message_a1 = 'WP 1 2000.00 2000.00'
message_a1 = 'WP 5 1688.25 667.54 2720.16 1280.30 2464.90 2452.84 1294.06 2715.76 675.93 1687.04'
TCP_IP = '35.2.37.146'
#TCP_PORT1 = 9998
TCP_PORT = 9999
BUFFER_SIZE = 512
MESSAGE = "Hello, World!"
socket.setdefaulttimeout(30)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.bind((TCP_IP, TCP_PORT))
#s.listen(3)
s.connect((TCP_IP, TCP_PORT))
#conn, addr = s.accept()
currentData = np.array([])
xData = np.array([])
yData = np.array([])
hData = np.array([])
sData = np.array([])
rData = np.array([])
#s.send(message_a1)
try:
while True:
#print 'Connection address:', addr
data = s.recv(BUFFER_SIZE)
if (data.find('WP')>=0):
s.send(message_a1)
print "sent wp:", message_a1
#sleep(0.5)
elif (data.find('Current')==0):
print "received data:", data
Data = re.findall( r'\d+\.*\d*', data)
if Data[0]:
currentData = np.append(currentData, float(Data[0]))
xData = np.append(xData, float(Data[1]))
yData = np.append(yData, float(Data[2]))
hData = np.append(hData, float(Data[3]))
sData = np.append(sData, float(Data[4]))
rData = np.append(rData, float(Data[5]))
#conn.send(MESSAGE) # echo
# = np.append(speedData, Data[3])
except KeyboardInterrupt:
pass
s.close()
DATA = np.array([currentData, xData, yData, hData, sData, rData])
"""
N = currentData.size
samples = np.arange(1, N+1)
plt.plot(samples, currentData)
plt.xlabel('Samples')
plt.ylabel('Current from Battery')
N = currentData.size
samples = np.arange(1, N+1)
plt.plot(xData, yData, 'ro')
plt.xlabel('X Position')
plt.ylabel('Y Position')
plt.axis([0, 3000, 0, 3000])
plt.show()
np.savetxt('testwp1.txt', DATA)
""" | gpl-3.0 |
fzalkow/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
ClimbsRocks/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | examples/covariance/plot_covariance_estimation.py | 4 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
# #############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
# #############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
# #############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
# #############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
nchammas/spark | python/pyspark/sql/pandas/utils.py | 6 | 2633 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.23.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "1.0.0"
from distutils.version import LooseVersion
import os
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
if os.environ.get("ARROW_PRE_0_15_IPC_FORMAT", "0") == "1":
raise RuntimeError("Arrow legacy IPC format is not supported in PySpark, "
"please unset ARROW_PRE_0_15_IPC_FORMAT")
| apache-2.0 |
fmpr/texttk | setup.py | 1 | 1901 | # -*- coding: utf-8 -*-
# Copyright (C) 2016, Filipe Rodrigues <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
try:
import setuptools
except:
print '''
setuptools not found.
On linux, the package is often called python-setuptools'''
from sys import exit
exit(1)
long_description = file('README.rst').read()
packages = filter(lambda p: p.startswith('texttk'), setuptools.find_packages())
print packages
setuptools.setup(name = 'texttk',
version = '0.1',
description = 'Text Preprocessing ToolKit',
long_description = long_description,
author = u'Filipe Rodrigues',
author_email = '[email protected]',
url = 'http://amilab.dei.uc.pt/fmpr/',
license = 'Academic Free License',
install_requires = ['nltk','sklearn','HTMLParser'],
packages = packages,
)
| gpl-3.0 |
leiferikb/bitpop | src/native_client/pnacl/driver/pnacl-driver.py | 2 | 28871 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
import re
import subprocess
from driver_tools import AddHostBinarySearchPath, DefaultOutputName, \
DriverChain, GetArch, ParseArgs, ParseTriple, Run, RunDriver, RunWithEnv, \
TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import DriverOpen, Log
import filetype
import pathtools
EXTRA_ENV = {
'ALLOW_TRANSLATE': '0', # Allow bitcode translation before linking.
# It doesn't normally make sense to do this.
'ALLOW_NATIVE' : '0', # Allow native objects (.S,.s,.o) to be in the
# linker line for .pexe generation.
# It doesn't normally make sense to do this.
# CXX_EH_MODE specifies how to deal with C++ exception handling:
# * 'none': Strips out use of C++ exception handling.
# * 'sjlj': Enables the setjmp()+longjmp()-based implementation of
# C++ exception handling. This is supported in PNaCl's stable
# ABI.
# * 'zerocost': Enables the zero-cost implementation of C++
# exception handling. This is not supported in PNaCl's stable
# ABI.
'CXX_EH_MODE': 'none',
'FORCE_INTERMEDIATE_LL': '0',
# Produce an intermediate .ll file
# Useful for debugging.
# NOTE: potentially different code paths and bugs
# might be triggered by this
'LANGUAGE' : '', # C or CXX (set by SetTool)
'INCLUDE_CXX_HEADERS': '0', # This is set by RunCC.
# Command-line options
'GCC_MODE' : '', # '' (default), '-E', '-c', or '-S'
'STDINC' : '1', # Include standard headers (-nostdinc sets to 0)
'STDINCCXX' : '1', # Include standard cxx headers (-nostdinc++ sets to 0)
'USE_STDLIB' : '1', # Include standard libraries (-nostdlib sets to 0)
'STDLIB' : '', # C++ Standard Library.
'STDLIB_TRUNC': '', # C++ Standard Library, truncated to pass as -lXXX.
'STDLIB_IDIR' : '', # C++ Standard Library include directory.
# Note: the above C++ Standard Library
# settings use a default if their value
# remains uset.
'DEFAULTLIBS' : '1', # Link with default libraries
'DIAGNOSTIC' : '0', # Diagnostic flag detected
'PIC' : '0', # Generate PIC
# TODO(robertm): Switch the default to 1
'NO_ASM' : '0', # Disallow use of inline assembler
'NEED_DASH_E' : '0', # Used for stdin inputs, which must have an explicit
# type set (using -x) unless -E is specified.
'VERBOSE' : '0', # Verbose (-v)
'SHOW_VERSION': '0', # Version (--version)
'PTHREAD' : '0', # use pthreads?
'INPUTS' : '', # Input files
'OUTPUT' : '', # Output file
'UNMATCHED' : '', # Unrecognized parameters
'BIAS_NONE' : '',
'BIAS_ARM' : '-D__arm__ -D__ARM_ARCH_7A__ -D__ARMEL__',
'BIAS_MIPS32' : '-D__MIPS__ -D__mips__ -D__MIPSEL__',
'BIAS_X8632' : '-D__i386__ -D__i386 -D__i686 -D__i686__ -D__pentium4__',
'BIAS_X8664' : '-D__amd64__ -D__amd64 -D__x86_64__ -D__x86_64 -D__core2__',
'BIAS_ARM_NONSFI': '${BIAS_ARM} -D__native_client_nonsfi__',
'BIAS_X8632_NONSFI': '${BIAS_X8632} -D__native_client_nonsfi__',
'FRONTEND_TRIPLE' : 'le32-unknown-nacl',
'OPT_LEVEL' : '', # Default for most tools is 0, but we need to know
# if it's explicitly set or not when the driver
# is only used for linking + translating.
'CC_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0} ' +
'-fno-vectorize -fno-slp-vectorize ' +
'-fno-common ${PTHREAD ? -pthread} ' +
'-nostdinc ${BIAS_%BIAS%} ' +
# BUG: http://code.google.com/p/nativeclient/issues/detail?id=2345
# it would be better to detect asm use inside clang
# as some uses of asm are borderline legit, e.g.
# <prototype> asm("<function-name>");
'${NO_ASM ? -Dasm=ASM_FORBIDDEN -D__asm__=ASM_FORBIDDEN} ' +
'-target ${FRONTEND_TRIPLE}',
'ISYSTEM' : '${ISYSTEM_USER} ${STDINC ? ${ISYSTEM_BUILTIN}}',
'ISYSTEM_USER' : '', # System include directories specified by
# using the -isystem flag.
'ISYSTEM_BUILTIN':
'${BASE_USR}/local/include ' +
'${ISYSTEM_CLANG} ' +
'${ISYSTEM_CXX} ' +
'${BASE_USR}/include ' +
'${BASE_SDK}/include ',
'ISYSTEM_CLANG' : '${BASE_LLVM}/lib/clang/3.4/include',
'ISYSTEM_CXX' :
'${INCLUDE_CXX_HEADERS && STDINCCXX ? ${ISYSTEM_CXX_include_paths}}',
'ISYSTEM_CXX_include_paths' :
'${BASE_USR}/include/c++/${STDLIB_IDIR} ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/arm-none-linux-gnueabi ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/backward',
# Only propagate opt level to linker if explicitly set, so that the
# linker will know if an opt level was explicitly set or not.
'LD_FLAGS' : '${#OPT_LEVEL ? -O${OPT_LEVEL}} -static ' +
'${PIC ? -fPIC} ${@AddPrefix:-L:SEARCH_DIRS} ' +
'--pnacl-exceptions=${CXX_EH_MODE}',
'SEARCH_DIRS' : '', # Directories specified using -L
# Library Strings
'EMITMODE' : '${!USE_STDLIB ? nostdlib : static}',
# This is setup so that LD_ARGS_xxx is evaluated lazily.
'LD_ARGS' : '${LD_ARGS_%EMITMODE%}',
# ${ld_inputs} signifies where to place the objects and libraries
# provided on the command-line.
'LD_ARGS_nostdlib': '-nostdlib ${ld_inputs}',
'LD_ARGS_static':
'${CXX_EH_MODE==zerocost ? -l:crt1_for_eh.x : -l:crt1.x} ' +
'-l:crti.bc -l:crtbegin.bc '
'${CXX_EH_MODE==sjlj ? -l:sjlj_eh_redirect.bc : '
'${CXX_EH_MODE==none ? -l:unwind_stubs.bc}} ' +
'${ld_inputs} ' +
'--start-group ${STDLIBS} --end-group',
'LLVM_PASSES_TO_DISABLE': '',
# Flags for translating to native .o files.
'TRANSLATE_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0}',
'STDLIBS' : '${DEFAULTLIBS ? '
'${LIBSTDCPP} ${LIBPTHREAD} ${LIBNACL} ${LIBC} ${LIBPNACLMM}}',
'LIBSTDCPP' : '${IS_CXX ? -l${STDLIB_TRUNC} -lm }',
'LIBC' : '-lc',
'LIBNACL' : '-lnacl',
'LIBPNACLMM': '-lpnaclmm',
# Enabled/disabled by -pthreads
'LIBPTHREAD': '${PTHREAD ? -lpthread}',
# IS_CXX is set by pnacl-clang and pnacl-clang++ programmatically
'CC' : '${IS_CXX ? ${CLANGXX} : ${CLANG}}',
'RUN_CC': '${CC} -emit-llvm ${mode} ${CC_FLAGS} ' +
'${@AddPrefix:-isystem :ISYSTEM} ' +
'-x${typespec} "${infile}" -o ${output}',
}
def AddLLVMPassDisableFlag(*args):
env.append('LLVM_PASSES_TO_DISABLE', *args)
env.append('LD_FLAGS', *args)
def AddLDFlag(*args):
env.append('LD_FLAGS', *args)
def AddTranslatorFlag(*args):
# pass translator args to ld in case we go all the way to .nexe
env.append('LD_FLAGS', *['-Wt,' + a for a in args])
# pass translator args to translator in case we go to .o
env.append('TRANSLATE_FLAGS', *args)
def AddCCFlag(*args):
env.append('CC_FLAGS', *args)
def AddDiagnosticFlag(*args):
env.append('CC_FLAGS', *args)
env.set('DIAGNOSTIC', '1')
def SetTarget(*args):
arch = ParseTriple(args[0])
env.set('FRONTEND_TRIPLE', args[0])
AddLDFlag('--target=' + args[0])
def SetStdLib(*args):
"""Set the C++ Standard Library."""
lib = args[0]
assert lib == 'libc++' or lib == 'libstdc++', (
'Invalid C++ standard library: -stdlib=%s' % lib)
env.set('STDLIB', lib)
env.set('STDLIB_TRUNC', lib[3:])
if lib == 'libc++':
env.set('STDLIB_IDIR', 'v1')
if env.getbool('IS_CXX'):
# libc++ depends on pthread for C++11 features as well as some
# exception handling (which may get removed later by the PNaCl ABI
# simplification) and initialize-once.
env.set('PTHREAD', '1')
elif lib == 'libstdc++':
env.set('STDLIB_IDIR', '4.6.2')
def IsPortable():
return env.getone('FRONTEND_TRIPLE').startswith('le32-')
stdin_count = 0
def AddInputFileStdin():
global stdin_count
# When stdin is an input, -x or -E must be given.
forced_type = filetype.GetForcedFileType()
if not forced_type:
# Only allowed if -E is specified.
forced_type = 'c'
env.set('NEED_DASH_E', '1')
stdin_name = '__stdin%d__' % stdin_count
env.append('INPUTS', stdin_name)
filetype.ForceFileType(stdin_name, forced_type)
stdin_count += 1
def IsStdinInput(f):
return f.startswith('__stdin') and f.endswith('__')
def HandleDashX(arg):
if arg == 'none':
filetype.SetForcedFileType(None)
return
filetype.SetForcedFileType(filetype.GCCTypeToFileType(arg))
def AddVersionFlag(*args):
env.set('SHOW_VERSION', '1')
AddDiagnosticFlag(*args)
def AddBPrefix(prefix):
""" Add a path to the list searched for host binaries and include dirs. """
AddHostBinarySearchPath(prefix)
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
# Add prefix/ to the library search dir if it exists
if pathtools.isdir(prefix):
env.append('SEARCH_DIRS', prefix)
# Add prefix/include to isystem if it exists
include_dir = prefix + 'include'
if pathtools.isdir(include_dir):
env.append('ISYSTEM_USER', include_dir)
CustomPatterns = [
( '--driver=(.+)', "env.set('CC', pathtools.normalize($0))\n"),
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--pnacl-allow-translate', "env.set('ALLOW_TRANSLATE', '1')"),
( '--pnacl-frontend-triple=(.+)', SetTarget),
( ('-target','(.+)'), SetTarget),
( ('--target=(.+)'), SetTarget),
( '--pnacl-exceptions=(none|sjlj|zerocost)', "env.set('CXX_EH_MODE', $0)"),
# TODO(mseaborn): Remove "--pnacl-allow-exceptions", which is
# superseded by "--pnacl-exceptions".
( '--pnacl-allow-exceptions', "env.set('CXX_EH_MODE', 'zerocost')"),
( '(--pnacl-allow-nexe-build-id)', AddLDFlag),
( '(--pnacl-disable-abi-check)', AddLDFlag),
( '(--pnacl-disable-pass=.+)', AddLLVMPassDisableFlag),
( '(--pnacl-allow-dev-intrinsics)', AddLDFlag),
]
GCCPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-E', "env.set('GCC_MODE', '-E')"),
( '-S', "env.set('GCC_MODE', '-S')"),
( '-c', "env.set('GCC_MODE', '-c')"),
( '-allow-asm', "env.set('NO_ASM', '0')"),
( '-nostdinc', "env.set('STDINC', '0')"),
( '-nostdinc\+\+', "env.set('STDINCCXX', '0')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-nodefaultlibs', "env.set('DEFAULTLIBS', '0')"),
( '-?-stdlib=(.*)', SetStdLib),
( ('-?-stdlib', '(.*)'), SetStdLib),
# Flags to pass to native linker
( '(-Wn,.*)', AddLDFlag),
( '-rdynamic', "env.append('LD_FLAGS', '-export-dynamic')"),
# Flags to pass to pnacl-translate
( '-Wt,(.*)', AddTranslatorFlag),
( ('-Xtranslator','(.*)'), AddTranslatorFlag),
# We don't care about -fPIC, but pnacl-ld and pnacl-translate do.
( '-fPIC', "env.set('PIC', '1')"),
# We must include -l, -Xlinker, and -Wl options into the INPUTS
# in the order they appeared. This is the exactly behavior of gcc.
# For example: gcc foo.c -Wl,--start-group -lx -ly -Wl,--end-group
#
( '(-l.+)', "env.append('INPUTS', $0)"),
( ('(-l)','(.+)'), "env.append('INPUTS', $0+$1)"),
( ('-Xlinker','(.*)'), "env.append('INPUTS', '-Xlinker=' + $0)"),
( '(-Wl,.*)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '-O([sz])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')\n"),
( '-O', "env.set('OPT_LEVEL', '1')\n"),
( ('-isystem', '(.*)'),
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( '-isystem(.+)',
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( ('-I', '(.+)'), "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
( '-I(.+)', "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
# NOTE: the -iquote =DIR syntax (substitute = with sysroot) doesn't work.
# Clang just says: ignoring nonexistent directory "=DIR"
( ('-iquote', '(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-iquote(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-idirafter', '(.+)'),
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( '-idirafter(.+)',
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( ('(-include)','(.+)'), AddCCFlag),
( ('(-include.+)'), AddCCFlag),
( '(-g)', AddCCFlag),
( '(-W.*)', AddCCFlag),
( '(-w)', AddCCFlag),
( '(-std=.*)', AddCCFlag),
( '(-ansi)', AddCCFlag),
( ('(-D)','(.*)'), AddCCFlag),
( '(-D.+)', AddCCFlag),
( ('(-U)','(.*)'), AddCCFlag),
( '(-U.+)', AddCCFlag),
( '(-f.*)', AddCCFlag),
( '(-pedantic)', AddCCFlag),
( '(-pedantic-errors)', AddCCFlag),
( '(-g.*)', AddCCFlag),
( '(-v|--v)', "env.append('CC_FLAGS', $0)\n"
"env.set('VERBOSE', '1')"),
( '(-pthreads?)', "env.set('PTHREAD', '1')"),
# No-op: accepted for compatibility in case build scripts pass it.
( '-static', ""),
( ('-B','(.*)'), AddBPrefix),
( ('-B(.+)'), AddBPrefix),
( ('-L','(.+)'), "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '-L(.+)', "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '(-Wp,.*)', AddCCFlag),
( '(-Xpreprocessor .*)', AddCCFlag),
( ('(-Xclang)', '(.*)'), AddCCFlag),
# Accept and ignore default flags
( '-m32', ""),
( '-emit-llvm', ""),
( '(-MG)', AddCCFlag),
( '(-MMD)', AddCCFlag),
( '(-MM?)', "env.append('CC_FLAGS', $0)\n"
"env.set('GCC_MODE', '-E')"),
( '(-MP)', AddCCFlag),
( ('(-MQ)','(.*)'), AddCCFlag),
( '(-MD)', AddCCFlag),
( ('(-MT)','(.*)'), AddCCFlag),
( ('(-MF)','(.*)'), "env.append('CC_FLAGS', $0, pathtools.normalize($1))"),
( ('-x', '(.+)'), HandleDashX),
( '-x(.+)', HandleDashX),
( ('(-mllvm)', '(.+)'), AddCCFlag),
# Ignore these gcc flags
( '(-msse)', ""),
( '(-march=armv7-a)', ""),
( '(-pipe)', ""),
( '(-s)', AddLDFlag),
( '(--strip-all)', AddLDFlag),
( '(--strip-debug)', AddLDFlag),
# Ignore these assembler flags
( '(-Qy)', ""),
( ('(--traditional-format)', '.*'), ""),
( '(-gstabs)', ""),
( '(--gstabs)', ""),
( '(-gdwarf2)', ""),
( '(--gdwarf2)', ""),
( '(--fatal-warnings)', ""),
( '(-meabi=.*)', ""),
( '(-mfpu=.*)', ""),
( '(-mfloat-abi=.+)', AddCCFlag),
# GCC diagnostic mode triggers
( '(-print-.*)', AddDiagnosticFlag),
( '(--print.*)', AddDiagnosticFlag),
( '(-dumpspecs)', AddDiagnosticFlag),
( '(--version)', AddVersionFlag),
# These are preprocessor flags which should be passed to the frontend, but
# should not prevent the usual -i flags (which DIAGNOSTIC mode does)
( '(-d[DIMNU])', AddCCFlag),
( '(-d.*)', AddDiagnosticFlag),
# Catch all other command-line arguments
( '(-.+)', "env.append('UNMATCHED', $0)"),
# Standard input
( '-', AddInputFileStdin),
# Input Files
# Call ForceFileType for all input files at the time they are
# parsed on the command-line. This ensures that the gcc "-x"
# setting is correctly applied.
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))\n"
"filetype.ForceFileType(pathtools.normalize($0))"),
]
def CheckSetup():
if not env.has('IS_CXX'):
Log.Fatal('"pnacl-driver" cannot be used directly. '
'Use pnacl-clang or pnacl-clang++.')
def DriverOutputTypes(driver_flag, compiling_to_native):
output_type_map = {
('-E', False) : 'pp',
('-E', True) : 'pp',
('-c', False) : 'po',
('-c', True) : 'o',
('-S', False) : 'll',
('-S', True) : 's',
('', False) : 'pexe',
('', True) : 'nexe',
}
return output_type_map[(driver_flag, compiling_to_native)]
def ReadDriverRevision():
rev_file = env.getone('DRIVER_REV_FILE')
# Might be an SVN version or a GIT hash (depending on the NaCl src client)
nacl_ver = DriverOpen(rev_file, 'rb').readlines()[0]
m = re.search(r'\[SVN\].*/native_client:\s*(\d+)', nacl_ver)
if m:
return m.group(1)
m = re.search(r'\[GIT\].*/native_client.git:\s*(\w+)', nacl_ver)
if m:
return m.group(1)
# fail-fast: if the REV file exists but regex search failed,
# we need to fix the regex to get nacl-version.
if not m:
Log.Fatal('Failed to parse REV file to get nacl-version.')
def main(argv):
env.update(EXTRA_ENV)
CheckSetup()
ParseArgs(argv, CustomPatterns + GCCPatterns)
# "configure", especially when run as part of a toolchain bootstrap
# process, will invoke gcc with various diagnostic options and
# parse the output. In these cases we do not alter the incoming
# commandline. It is also important to not emit spurious messages.
if env.getbool('DIAGNOSTIC'):
if env.getbool('SHOW_VERSION'):
code, stdout, stderr = Run(env.get('CC') + env.get('CC_FLAGS'),
redirect_stdout=subprocess.PIPE)
out = stdout.split('\n')
nacl_version = ReadDriverRevision()
out[0] += ' nacl-version=%s' % nacl_version
stdout = '\n'.join(out)
print stdout,
else:
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
unmatched = env.get('UNMATCHED')
if len(unmatched) > 0:
UnrecognizedOption(*unmatched)
# If -arch was given, we are compiling directly to native code
compiling_to_native = GetArch() is not None
if env.getbool('ALLOW_NATIVE') and not compiling_to_native:
Log.Fatal("--pnacl-allow-native without -arch is not meaningful.")
if not env.get('STDLIB'):
# Default C++ Standard Library.
SetStdLib('libc++')
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(inputs) == 0:
if env.getbool('VERBOSE'):
# -v can be invoked without any inputs. Runs the original
# command without modifying the commandline for this case.
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
else:
Log.Fatal('No input files')
gcc_mode = env.getone('GCC_MODE')
output_type = DriverOutputTypes(gcc_mode, compiling_to_native)
needs_linking = (gcc_mode == '')
if env.getbool('NEED_DASH_E') and gcc_mode != '-E':
Log.Fatal("-E or -x required when input is from stdin")
# There are multiple input files and no linking is being done.
# There will be multiple outputs. Handle this case separately.
if not needs_linking:
# Filter out flags
inputs = [f for f in inputs if not IsFlag(f)]
if output != '' and len(inputs) > 1:
Log.Fatal('Cannot have -o with -c, -S, or -E and multiple inputs: %s',
repr(inputs))
for f in inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if not filetype.IsSourceType(intype):
if ((output_type == 'pp' and intype != 'S') or
(output_type == 'll') or
(output_type == 'po' and intype != 'll') or
(output_type == 's' and intype not in ('ll','po','S')) or
(output_type == 'o' and intype not in ('ll','po','S','s'))):
Log.Fatal("%s: Unexpected type of file for '%s'",
pathtools.touser(f), gcc_mode)
if output == '':
f_output = DefaultOutputName(f, output_type)
else:
f_output = output
namegen = TempNameGen([f], f_output)
CompileOne(f, output_type, namegen, f_output)
return 0
# Linking case
assert(needs_linking)
assert(output_type in ('pso','so','pexe','nexe'))
if output == '':
output = pathtools.normalize('a.out')
namegen = TempNameGen(inputs, output)
# Compile all source files (c/c++/ll) to .po
for i in xrange(0, len(inputs)):
if IsFlag(inputs[i]):
continue
intype = filetype.FileType(inputs[i])
if filetype.IsSourceType(intype) or intype == 'll':
inputs[i] = CompileOne(inputs[i], 'po', namegen)
# Compile all .s/.S to .o
if env.getbool('ALLOW_NATIVE'):
for i in xrange(0, len(inputs)):
if IsFlag(inputs[i]):
continue
intype = filetype.FileType(inputs[i])
if intype in ('s','S'):
inputs[i] = CompileOne(inputs[i], 'o', namegen)
# We should only be left with .po and .o and libraries
for f in inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if intype in ('o','s','S') or filetype.IsNativeArchive(f):
if not env.getbool('ALLOW_NATIVE'):
Log.Fatal('%s: Native object files not allowed in link. '
'Use --pnacl-allow-native to override.', pathtools.touser(f))
assert(intype in ('po','o','so','ldscript') or filetype.IsArchive(f))
# Fix the user-specified linker arguments
ld_inputs = []
for f in inputs:
if f.startswith('-Xlinker='):
ld_inputs.append(f[len('-Xlinker='):])
elif f.startswith('-Wl,'):
ld_inputs += f[len('-Wl,'):].split(',')
else:
ld_inputs.append(f)
if env.getbool('ALLOW_NATIVE'):
ld_inputs.append('--pnacl-allow-native')
# Invoke the linker
env.set('ld_inputs', *ld_inputs)
ld_args = env.get('LD_ARGS')
ld_flags = env.get('LD_FLAGS')
RunDriver('ld', ld_flags + ld_args + ['-o', output])
return 0
def IsFlag(f):
return f.startswith('-')
def CompileOne(infile, output_type, namegen, output = None):
if output is None:
output = namegen.TempNameForInput(infile, output_type)
chain = DriverChain(infile, output, namegen)
SetupChain(chain, filetype.FileType(infile), output_type)
chain.run()
return output
def RunCC(infile, output, mode):
intype = filetype.FileType(infile)
typespec = filetype.FileTypeToGCCType(intype)
include_cxx_headers = (env.get('LANGUAGE') == 'CXX') or (intype == 'c++')
env.setbool('INCLUDE_CXX_HEADERS', include_cxx_headers)
if IsStdinInput(infile):
infile = '-'
RunWithEnv("${RUN_CC}", infile=infile, output=output,
mode=mode,
typespec=typespec)
def RunLLVMAS(infile, output):
if IsStdinInput(infile):
infile = '-'
# This is a bitcode only step - so get rid of "-arch xxx" which
# might be inherited from the current invocation
RunDriver('as', [infile, '-o', output], suppress_inherited_arch_args=True)
def RunNativeAS(infile, output):
if IsStdinInput(infile):
infile = '-'
RunDriver('as', [infile, '-o', output])
def RunTranslate(infile, output, mode):
if not env.getbool('ALLOW_TRANSLATE'):
Log.Fatal('%s: Trying to convert bitcode to an object file before '
'bitcode linking. This is supposed to wait until '
'translation. Use --pnacl-allow-translate to override.',
pathtools.touser(infile))
args = env.get('TRANSLATE_FLAGS') + [mode, '--allow-llvm-bitcode-input',
infile, '-o', output]
if env.getbool('PIC'):
args += ['-fPIC']
RunDriver('translate', args)
def RunOpt(infile, outfile, pass_list):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('opt', filtered_list + [infile, '-o', outfile])
def SetupChain(chain, input_type, output_type):
assert(output_type in ('pp','ll','po','s','o'))
cur_type = input_type
# source file -> pp
if filetype.IsSourceType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# source file -> ll
if (filetype.IsSourceType(cur_type) and
(env.getbool('FORCE_INTERMEDIATE_LL') or output_type == 'll')):
chain.add(RunCC, 'll', mode='-S')
cur_type = 'll'
if cur_type == output_type:
return
# ll -> po
if cur_type == 'll':
chain.add(RunLLVMAS, 'po')
cur_type = 'po'
if cur_type == output_type:
return
# source file -> po (we also force native output to go through this phase
if filetype.IsSourceType(cur_type) and output_type in ('po', 'o', 's'):
chain.add(RunCC, 'po', mode='-c')
cur_type = 'po'
if cur_type == output_type:
return
# po -> o
if (cur_type == 'po' and output_type == 'o'):
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 'o', mode='-c')
cur_type = 'o'
if cur_type == output_type:
return
# po -> s
if cur_type == 'po':
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 's', mode='-S')
cur_type = 's'
if cur_type == output_type:
return
# S -> s
if cur_type == 'S':
chain.add(RunCC, 's', mode='-E')
cur_type = 's'
if output_type == 'pp':
return
if cur_type == output_type:
return
# s -> o
if cur_type == 's' and output_type == 'o':
chain.add(RunNativeAS, 'o')
cur_type = 'o'
if cur_type == output_type:
return
Log.Fatal("Unable to compile .%s to .%s", input_type, output_type)
def get_help(argv):
tool = env.getone('SCRIPT_NAME')
if '--help-full' in argv:
# To get ${CC}, etc.
env.update(EXTRA_ENV)
code, stdout, stderr = Run('"${CC}" -help',
redirect_stdout=subprocess.PIPE,
redirect_stderr=subprocess.STDOUT,
errexit=False)
return stdout
else:
return """
This is a "GCC-compatible" driver using clang under the hood.
Usage: %s [options] <inputs> ...
BASIC OPTIONS:
-o <file> Output to <file>.
-E Only run the preprocessor.
-S Generate bitcode assembly.
-c Generate bitcode object.
-I <dir> Add header search path.
-L <dir> Add library search path.
-D<key>[=<val>] Add definition for the preprocessor.
-W<id> Toggle warning <id>.
-f<feature> Enable <feature>.
-Wl,<arg> Pass <arg> to the linker.
-Xlinker <arg> Pass <arg> to the linker.
-Wt,<arg> Pass <arg> to the translator.
-Xtranslator <arg> Pass <arg> to the translator.
-Wp,<arg> Pass <arg> to the preprocessor.
-Xpreprocessor,<arg> Pass <arg> to the preprocessor.
-x <language> Treat subsequent input files as having type <language>.
-static Produce a static executable (the default).
-Bstatic Link subsequent libraries statically.
-Bdynamic Link subsequent libraries dynamically.
-fPIC Ignored (only used by translator backend)
(accepted for compatibility).
-pipe Ignored (for compatibility).
-O<n> Optimation level <n>: 0, 1, 2, 3, 4 or s.
-g Generate complete debug information.
-gline-tables-only Generate debug line-information only
(allowing for stack traces).
-flimit-debug-info Generate limited debug information.
-save-temps Keep intermediate compilation results.
-v Verbose output / show commands.
-h | --help Show this help.
--help-full Show underlying clang driver's help message
(warning: not all options supported).
""" % (tool)
| gpl-3.0 |
soar-telescope/goodman | goodman_pipeline/spectroscopy/wavelength.py | 2 | 32546 | # -*- coding: utf8 -*-
"""Contains the tools to produce a wavelength solution
This module gets the extracted data to produce a wavelength solution, linearize
the spectrum and write the solution to the image's header following the FITS
standard.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import json
import logging
import os
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
from astropy.stats import sigma_clip
from ccdproc import CCDData
from matplotlib.backends.backend_pdf import PdfPages
from ..wcs.wcs import WCS
from ..core import (add_linear_wavelength_solution,
bin_reference_data,
cross_correlation,
evaluate_wavelength_solution,
get_lines_in_lamp,
linearize_spectrum,
write_fits)
from ..core import (ReferenceData, NoMatchFound)
log = logging.getLogger(__name__)
class WavelengthCalibration(object):
"""Wavelength Calibration Class
The WavelengthCalibration class is instantiated for each of the science
images, which are treated as a "science object". In this first release it
can find a wavelength solution for a given comparison lamp using an
interactive GUI based on Matplotlib. Although it works very good, for the
next release there is a plan for creating an independent GUI based on QT in
order to work better in different screen sizes and other topic such as
showing warnings, messages and help.
This class takes 1D spectrum with no wavelength calibration and returns fits
files with wavelength solutions using the FITS standard for linear
solutions. Goodman spectra are slightly non-linear therefore they are
linearized and smoothed before they are returned for the user.
"""
def __init__(self):
"""Wavelength Calibration Class Initialization
A WavelengthCalibration class is instantiated for each science target
being processed, i.e. every science image.
Notes:
This class violates some conventions as for length and number of
attributes is concerned. Solving this is part of a prioritary plans
for next release.
Args:
args (Namespace): Runtime arguments.
"""
self.poly_order = 3
self.wcs = WCS()
self.wsolution = None
self.wcal_lamp_file = None
self.sci_target_file = None
self.n_points = None
self.n_rejections = None
self.rms_error = None
self.cross_corr_tolerance = 5
self.reference_data_dir = None
self.reference_data = None
self.calibration_lamp = ''
self.wcal_lamp_file = ''
# Instrument configuration and spectral characteristics
self.serial_binning = None
self.parallel_binning = None
def __call__(self,
ccd,
comp_list,
save_data_to,
reference_data,
object_number=None,
corr_tolerance=15,
output_prefix='w',
plot_results=False,
save_plots=False,
plots=False,
json_output=False):
"""Call method for the WavelengthSolution Class
It takes extracted data and produces wavelength calibrated 1D FITS file.
The call method takes care of the order and logic needed to call the
different methods. A wavelength solution can be recycled for the next
science object. In that case, the wavelength solution is parsed as an
argument and then there is no need to calculate it again. The recycling
part has to be implemented in the caller function.
Args:
ccd (CCDData) a :class:`~astropy.nddata.CCDData` instance
comp_list (list): Comparison lamps for the science target that will
be processed here. Every element of this list is an instance of
:class:`~astropy.nddata.CCDData`.
save_data_to (str): Path to save processed data.
object_number (int): In case of multiple detections in a single
image this number will be added as a suffix before `.fits` in
order to allow for multiple 1D files. Default value is None.
corr_tolerance (int): `cross_corr_tolerance` stands for cross
correlation tolerance, in other words, how far the cross
correlation can be from the global cross correlation. It usually
increases with the frequency of the grating.
output_prefix (str): Prefix to add to files.
plot_results (bool): Present a plot showing the wavelength
calibrated data.
save_plots (bool): Save any plot shown. They are saved under
`<path>/<save_data_to>/plots/` where `<path>/<save_data_to>` is
the full path to the folder that `save_data_to` is pointing.
plots (bool): Show plots during operation.
Returns:
wavelength_solution (object): The mathematical model of the
wavelength solution. If it fails to create it will return a
None element.
"""
assert isinstance(ccd, CCDData)
assert isinstance(comp_list, list)
json_payload = {'wavelength_solution': [],
'warning': '',
'error': ''}
if os.path.isdir(reference_data):
if self.reference_data_dir != reference_data:
self.reference_data_dir = reference_data
self.reference_data = ReferenceData(
reference_dir=self.reference_data_dir)
self.cross_corr_tolerance = corr_tolerance
self.sci_target_file = ccd.header['GSP_FNAM']
self.i_fig = None
log.info('Processing Science Target: '
'{:s}'.format(ccd.header['OBJECT']))
if len(comp_list) == 0:
log.warning("No comparison lamps were provided for file {}"
"".format(self.sci_target_file))
log.error("Ending processing of {}".format(self.sci_target_file))
if json_output:
json_payload['error'] ='Unable to process without reference lamps'
return json_payload
else:
return
else:
wavelength_solutions = []
reference_lamp_names = []
for self.lamp in comp_list:
self.calibration_lamp = self.lamp.header['GSP_FNAM']
log.info('Using reference lamp {}'.format(self.calibration_lamp))
self.raw_pixel_axis = range(self.lamp.shape[0])
self.lamp_name = self.lamp.header['OBJECT']
log.info('Processing Comparison Lamp: '
'{:s}'.format(self.lamp_name))
self.lines_center = get_lines_in_lamp(
ccd=self.lamp, plots=plots)
try:
self._automatic_wavelength_solution(
save_data_to=save_data_to,
corr_tolerance=self.cross_corr_tolerance)
except NoMatchFound as message:
raise NoMatchFound(message)
if self.wsolution is not None:
ccd.header.set('GSP_WRMS', value=self.rms_error)
ccd.header.set('GSP_WPOI', value=self.n_points)
ccd.header.set('GSP_WREJ', value=self.n_rejections)
linear_x_axis, self.lamp.data = linearize_spectrum(
self.lamp.data,
wavelength_solution=self.wsolution)
self.lamp = self.wcs.write_gsp_wcs(ccd=self.lamp,
model=self.wsolution)
self.lamp = add_linear_wavelength_solution(
ccd=self.lamp,
x_axis=linear_x_axis,
reference_lamp=self.calibration_lamp)
self.wcal_lamp_file = self._save_wavelength_calibrated(
ccd=self.lamp,
original_filename=self.calibration_lamp,
save_data_to=save_data_to,
output_prefix=output_prefix,
index=object_number,
lamp=True)
wavelength_solutions.append(self.wsolution)
reference_lamp_names.append(self.wcal_lamp_file)
else:
log.error('It was not possible to get a wavelength '
'solution from lamp '
'{:s} {:s}.'.format(
self.lamp.header['GSP_FNAM'],
self.lamp.header['OBJECT']))
continue
if len(wavelength_solutions) > 1:
warning_message = str("The current version of the pipeline "
"does not combine multiple solution "
"instead it saves a single version of "
"the science file for each wavelength "
"solution calculated.")
log.warning(warning_message)
all_solution_info = []
for i in range(len(wavelength_solutions)):
# TODO (simon): Combine Multiple solutions
self.wsolution = wavelength_solutions[i]
self.wcal_lamp_file = reference_lamp_names[i]
ccd = self.wcs.write_gsp_wcs(ccd=ccd, model=self.wsolution)
saved_file_name = self._save_science_data(
ccd=ccd,
wavelength_solution=self.wsolution,
save_to=save_data_to,
index=i + 1,
plot_results=plot_results,
save_plots=save_plots,
plots=plots)
all_solution_info.append({
'solution_info': {'rms_error': "{:.4f}".format(self.rms_error),
'npoints': "{:d}".format(self.n_points),
'nrjections': "{:d}".format(self.n_rejections)},
'file_name': saved_file_name,
'reference_lamp': self.wcal_lamp_file})
if json_output:
json_payload['warning'] = warning_message
json_payload['wavelength_solution'] = all_solution_info
return json_payload
elif len(wavelength_solutions) == 1:
self.wsolution = wavelength_solutions[0]
self.wcal_lamp_file = reference_lamp_names[0]
ccd = self.wcs.write_gsp_wcs(ccd=ccd, model=self.wsolution)
saved_file_name = self._save_science_data(
ccd=ccd,
wavelength_solution=self.wsolution,
save_to=save_data_to,
plot_results=plot_results,
save_plots=save_plots,
index=object_number,
plots=plots)
if json_output:
json_payload['wavelength_solution'] = [
{'solution_info': {'rms_error': "{:.4f}".format(self.rms_error),
'npoints': "{:d}".format(self.n_points),
'nrjections': "{:d}".format(self.n_rejections)},
'file_name': saved_file_name,
'reference_lamp': self.wcal_lamp_file}]
return json_payload
else:
log.error("No wavelength solution.")
if json_output:
json_payload['error'] = "Unable to obtain wavelength solution"
return json_payload
def _automatic_wavelength_solution(self,
save_data_to,
corr_tolerance=15,
plot_results=False,
save_plots=False,
plots=False):
"""Finds a Wavelength Solution Automatically
This method uses a library of previously wavelength-calibrated
comparison lamps. It will only process them if they are the exact match.
A workflow summary is presented below:
- Identify the exactly matching reference comparison lamp. If it
doesn't exist it will return None. If it does exist the reference
lamp will be loaded and it's wavelength solution read.
- Identify lines in the new lamp, the lamp data has been already
loaded at the initialization of the class
- According to the lines detected it will split both spectrum in the
same number of pieces and same respective sizes and then will do
cross correlation for each of them.
- The line's pixel value is stored
- Using the reference lamp's wavelength solution mathematical model,
the corresponding value in angstrom is calculated using the offset
obtained from the cross correlation something like this:
angstrom = model(pixel + offset)
- As a first order filter one-iteration of a two-sigma clipping is
applied to the cross-correlation offsets, this is necessary to
eliminate mismatched lines.
- A new wavelength solution is calculated using the points collected
above.
- Using the Angstrom values previously found and the detected lines
plus the newly calculated solution, the differences in angstrom are
calculated to which values a new sigma-clipping is applied, again
one iteration two-sigmas, since the distributions are not
necessarily normal distributions.
- Once these values are cleaned of rejected values the final solution
is calculated.
Returns:
None in case it is not possible to find a suitable template lamp or
if is not possible to calculate the solution.
"""
try:
reference_lamp_ccd = self.reference_data.get_reference_lamp(
header=self.lamp.header)
log.debug('Found reference lamp: '
'{:s}'.format(reference_lamp_ccd.header['GSP_FNAM']))
except NoMatchFound as error:
raise NoMatchFound(error)
except NotImplementedError as error:
raise NotImplemented(error)
# TODO (simon): Evaluate possibility to read iraf wcs. [#304]
reference_lamp_wav_axis, reference_lamp_ccd.data = \
self.wcs.read_gsp_wcs(ccd=reference_lamp_ccd)
self.serial_binning, self.parallel_binning = [
int(x) for x in self.lamp.header['CCDSUM'].split()]
if self.serial_binning != 1:
reference_lamp_wav_axis, reference_lamp_ccd.data = \
bin_reference_data(wavelength=reference_lamp_wav_axis,
intensity=reference_lamp_ccd.data,
serial_binning=self.serial_binning)
self.wcs.binning = self.serial_binning
'''detect lines in comparison lamp (not reference)'''
lamp_lines_pixel = get_lines_in_lamp(ccd=self.lamp,
plots=plots)
lamp_lines_angst = self.wcs.model(lamp_lines_pixel)
pixel_values = []
angstrom_values = []
correlation_values = []
angstrom_differences = []
log.debug('Length {:d}'.format(len(self.lamp.data)))
log.debug('NLines {:d}'.format(len(lamp_lines_pixel)))
log.debug('Length / NLines {:.3f}'.format(
len(self.lamp.data) / float(len(lamp_lines_pixel))))
slit_size = float(re.sub('["A-Za-z_ ]', '', self.lamp.header['SLIT']))
global_cross_corr = cross_correlation(
reference=reference_lamp_ccd.data,
compared=self.lamp.data,
slit_size=slit_size,
serial_binning=self.serial_binning)
half_width = np.max(
[int((len(self.lamp.data) / float(len(lamp_lines_pixel)))),
4 * global_cross_corr])
for i in range(len(lamp_lines_pixel)):
line_value_pixel = lamp_lines_pixel[i]
line_value_angst = lamp_lines_angst[i]
xmin = int(max(0, round(line_value_pixel - half_width)))
xmax = int(min(round(line_value_pixel + half_width),
len(self.lamp.data)))
if xmin >= xmax:
continue
# print(xmin, xmax, self.lamp.data.size)
# TODO (simon): Convolve to match wider lines such as those from
# TODO (cont): the slit of 5 arseconds
ref_sample = reference_lamp_ccd.data[xmin:xmax]
# ref_wavele = reference_lamp_wav_axis[xmin:xmax]
lamp_sample = self.lamp.data[xmin:xmax]
slit_size = float(re.sub('["A-Za-z_ ]', '', self.lamp.header['SLIT']))
correlation_value = cross_correlation(
reference=ref_sample,
compared=lamp_sample,
slit_size=slit_size,
serial_binning=self.serial_binning)
log.debug('Cross correlation value '
'{:s} vs {:s}'.format(str(global_cross_corr),
str(correlation_value)))
if - corr_tolerance < (global_cross_corr - correlation_value) < \
corr_tolerance:
"""record value for reference wavelength"""
# print(global_cross_corr - correlation_value)
angstrom_value_model = self.wcs.model(
line_value_pixel + correlation_value)
# print(correlation_value, angstrom_value_model)
correlation_values.append(correlation_value)
angstrom_differences.append(angstrom_value_model -
line_value_angst)
angstrom_values.append(angstrom_value_model)
# print(angstrom_values)
pixel_values.append(line_value_pixel)
else:
log.debug("Local cross correlation value {:.3f} is too far "
"from {:.3f}".format(correlation_value,
global_cross_corr))
if plots: # pragma: no cover
# print(global_cross_corr, correlation_value)
plt.ion()
plt.title('Samples after cross correlation\n Shift {:.3f}'
''.format(correlation_value))
plt.xlabel('Pixel Axis')
plt.ylabel('Intensity')
plt.plot(ref_sample,
color='k',
label='Reference Sample')
plt.plot([x + correlation_value for x in
range(len(lamp_sample))],
lamp_sample,
label='New Lamp Sample')
plt.legend(loc='best')
plt.draw()
plt.pause(1)
plt.clf()
plt.ioff()
# This is good and necessary as a first approach for some very wrong
# correlation results
clipped_values = sigma_clip(correlation_values,
sigma=3,
maxiters=1,
cenfunc=np.ma.median)
# print(clipped_values)
if np.ma.is_masked(clipped_values):
_pixel_values = list(pixel_values)
_angstrom_values = list(angstrom_values)
# print(_angstrom_values)
pixel_values = []
angstrom_values = []
for i in range(len(clipped_values)):
if clipped_values[i] is not np.ma.masked:
pixel_values.append(_pixel_values[i])
# print(_angstrom_values[i][0])
angstrom_values.append(_angstrom_values[i])
# Create a wavelength solution
log.info('Creating Wavelength Solution')
self.wsolution = self.wcs.fit(physical=pixel_values,
wavelength=angstrom_values,
model_name='chebyshev',
degree=self.poly_order)
if self.wsolution is None:
log.error('Failed to find wavelength solution using reference '
'file: {:s}'.format(self.calibration_lamp))
return None
# finding differences in order to improve the wavelength solution
wavelength_differences = [angstrom_values[i] -
self.wsolution(pixel_values[i]) for i in
range(len(pixel_values))]
clipped_differences = sigma_clip(wavelength_differences,
sigma=2,
maxiters=3,
cenfunc=np.ma.median)
if np.ma.is_masked(clipped_differences):
log.debug('Cleaning pixel to angstrom match to improve '
'wavelength solution')
_pixel_values = list(pixel_values)
_angstrom_values = list(angstrom_values)
pixel_values = []
angstrom_values = []
for i in range(len(clipped_differences)):
if clipped_differences[i] is not np.ma.masked:
pixel_values.append(_pixel_values[i])
angstrom_values.append(_angstrom_values[i])
log.info('Re-fitting wavelength solution')
self.wsolution = self.wcs.fit(physical=pixel_values,
wavelength=angstrom_values,
model_name='chebyshev',
degree=self.poly_order)
self.rms_error, self.n_points, self.n_rejections = \
evaluate_wavelength_solution(
clipped_differences=clipped_differences)
if plot_results or plots or \
save_plots: # pragma: no cover
plt.close('all')
plt.switch_backend('Qt5Agg')
# print(self.i_fig)
self.i_fig = None
if self.i_fig is None:
self.i_fig = plt.figure()
self.i_fig.canvas.set_window_title(
'Automatic Wavelength Solution')
self.ax1 = self.i_fig.add_subplot(111)
self.ax1.set_rasterization_zorder(1)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not plots:
plt.ion()
# plt.show()
else:
plt.ioff()
self.ax1.plot([], color='m', label='Pixels')
self.ax1.plot([], color='c', label='Angstrom')
for val in pixel_values:
self.ax1.axvline(self.wsolution(val), color='m', zorder=0)
for val2 in angstrom_values:
self.ax1.axvline(val2, color='c', linestyle='--', zorder=0)
self.ax1.plot(reference_lamp_wav_axis,
reference_lamp_ccd.data,
label='Reference',
color='k',
alpha=1, zorder=0)
self.ax1.plot(self.wsolution(self.raw_pixel_axis),
self.lamp.data,
label='Last Solution',
color='r',
alpha=0.7, zorder=0)
try:
wavmode = self.lamp.header['wavmode']
except KeyError as error:
log.debug(error)
wavmode = ''
self.ax1.set_xlabel('Wavelength (Angstrom)')
self.ax1.set_ylabel('Intensity (ADU)')
self.ax1.set_title('Automatic Wavelength Solution\n'
+ self.lamp.header['OBJECT']
+ ' ' + wavmode + '\n'
+ 'RMS Error: {:.3f}'.format(self.rms_error))
self.ax1.legend(loc='best')
self.i_fig.tight_layout()
if save_plots:
plots_path = os.path.join(save_data_to, 'plots')
if not os.path.isdir(plots_path):
os.path.os.makedirs(plots_path)
# saves pdf files of the wavelength solution plot
out_file_name = 'automatic-solution_' + self.lamp.header[
'GSP_FNAM']
out_file_name = re.sub('.fits', '', out_file_name)
file_count = len(glob.glob(
os.path.join(save_data_to,
out_file_name + '*'))) + 1
out_file_name += '_RMS_{:.3f}_{:03d}.pdf'.format(self.rms_error,
file_count)
pdf_pages = PdfPages(
os.path.join(plots_path, out_file_name))
plt.savefig(pdf_pages, format='pdf')
pdf_pages.close()
plot_name = os.path.join(plots_path,
re.sub('pdf', 'png', out_file_name))
plt.savefig(plot_name, rasterized=True, format='png', dpi=300)
plt.ioff()
plt.clf()
if plots or plot_results: # pragma: no cover
manager = plt.get_current_fig_manager()
if plt.get_backend() == u'GTK3Agg':
manager.window.maximize()
elif plt.get_backend() == u'Qt5Agg':
manager.window.showMaximized()
if plots:
plt.show()
elif plot_results:
plt.draw()
plt.pause(1)
plt.ioff()
plt.close()
def _save_science_data(self,
ccd,
wavelength_solution,
save_to,
index=None,
plot_results=False,
save_plots=False,
plots=False):
"""Save wavelength calibrated data
The spectrum is linearized, then the linear solution is recorded in the
ccd's header and finally it calls the method
:func:`~wavelength.WavelengthCalibration._save_wavelength_calibrated`
which performs the actual saving to a file.
Args:
ccd (CCDData): Instance of :class:`~astropy.nddata.CCDData` with a
1D spectrum.
wavelength_solution (object): A :class:`~astropy.modeling.Model`
save_to (str): Path to save location
index (int): If there are more than one target, they are identified
by this index.
plot_results (bool): Whether to show plots or not.
save_plots (bool): Whether to save plots to files.
plots
Returns:
File name of saved file.
"""
ccd = ccd.copy()
linear_x_axis, ccd.data = linearize_spectrum(
data=ccd.data,
wavelength_solution=wavelength_solution)
ccd = add_linear_wavelength_solution(
ccd=ccd,
x_axis=linear_x_axis,
reference_lamp=self.calibration_lamp)
save_file_name = self._save_wavelength_calibrated(
ccd=ccd,
original_filename=ccd.header['GSP_FNAM'],
save_data_to=save_to,
index=index)
if plot_results or plots or save_plots: # pragma: no cover
plt.close(1)
if plot_results:
plt.ion()
# plt.show()
elif plots:
plt.ioff()
wavelength_axis = wavelength_solution(range(ccd.data.size))
object_name = ccd.header['OBJECT']
grating = ccd.header['GRATING']
fig_title = 'Wavelength Calibrated Data : ' \
'{:s}\n{:s}'.format(object_name, grating)
fig, ax1 = plt.subplots(1)
fig.canvas.set_window_title(ccd.header['GSP_FNAM'])
# ax1 = fig.add_subplot(111)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
ax1.set_title(fig_title)
ax1.set_xlabel('Wavelength (Angstrom)')
ax1.set_ylabel('Intensity (ADU)')
ax1.set_xlim((wavelength_axis[0], wavelength_axis[-1]))
# plt.close(1)
ax1.plot(wavelength_axis,
ccd.data,
color='k',
label='Data')
ax1.legend(loc='best')
fig.tight_layout()
if save_plots:
log.info('Saving plots')
plots_dir = os.path.join(save_to,
'plots')
if not os.path.isdir(plots_dir):
os.mkdir(plots_dir)
plot_name = re.sub('.fits',
'.png',
ccd.header['GSP_FNAM'])
plot_path = os.path.join(plots_dir, plot_name)
# print(plot_path)
plt.savefig(plot_path, dpi=300)
log.info('Saved plot as {:s} file '
'DPI=300'.format(plot_name))
if plots or plot_results: # pragma: no cover
manager = plt.get_current_fig_manager()
if plt.get_backend() == u'GTK3Agg':
manager.window.maximize()
elif plt.get_backend() == u'Qt5Agg':
manager.window.showMaximized()
if plots:
plt.show()
elif plot_results:
plt.draw()
plt.pause(2)
plt.ioff()
return save_file_name
def _save_wavelength_calibrated(self,
ccd,
original_filename,
save_data_to,
output_prefix='w',
index=None,
lamp=False):
if index is None:
f_end = '.fits'
else:
f_end = '_ws_{:d}.fits'.format(index)
file_full_path = os.path.join(save_data_to,
output_prefix +
original_filename.replace('.fits', f_end))
if lamp:
log.info('Wavelength-calibrated {:s} file saved to: '
'{:s} for science file {:s}'
''.format(ccd.header['OBSTYPE'],
os.path.basename(file_full_path),
self.sci_target_file))
ccd.header.set('GSP_SCTR',
value=self.sci_target_file,
after='GSP_FLAT')
else:
log.info('Wavelength-calibrated {:s} file saved to: '
'{:s} using reference lamp {:s}'
''.format(ccd.header['OBSTYPE'],
os.path.basename(file_full_path),
self.wcal_lamp_file))
ccd.header.set(
'GSP_LAMP',
value=self.wcal_lamp_file,
comment='Reference lamp used to obtain wavelength solution',
after='GSP_FLAT')
write_fits(ccd=ccd,
full_path=file_full_path,
parent_file=original_filename)
return file_full_path
if __name__ == '__main__': # pragma: no cover
sys.exit('This can not be run on its own.')
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
Eric89GXL/mne-python | mne/tests/test_source_space.py | 7 | 43205 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from shutil import copytree
import pytest
import scipy
import numpy as np
from numpy.testing import (assert_array_equal, assert_allclose, assert_equal,
assert_array_less)
from mne.datasets import testing
import mne
from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
setup_source_space, setup_volume_source_space,
add_source_space_distances, read_bem_surfaces,
morph_source_spaces, SourceEstimate, make_sphere_model,
head_to_mni, compute_source_morph, pick_types,
read_bem_solution, read_freesurfer_lut, read_talxfm,
read_trans)
from mne.fixes import _get_img_fdata
from mne.utils import (requires_nibabel, run_subprocess,
modified_env, requires_mne, run_tests_if_main,
check_version)
from mne.surface import _accumulate_normals, _triangle_neighbors
from mne.source_space import _get_mgz_header
from mne.source_estimate import _get_src_type
from mne.transforms import apply_trans, _get_trans
from mne.source_space import (get_volume_labels_from_aseg,
get_volume_labels_from_src,
_compare_source_spaces,
compute_distance_to_sensors)
from mne.io.pick import _picks_to_idx
from mne.io.constants import FIFF
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
aseg_fname = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
fname_vol = op.join(subjects_dir, 'sample', 'bem',
'sample-volume-7mm-src.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem.fif')
fname_bem_sol = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem-sol.fif')
fname_bem_3 = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem.fif')
fname_bem_3_sol = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_fs = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif')
fname_morph = op.join(subjects_dir, 'sample', 'bem',
'sample-fsaverage-ico-5-src.fif')
fname_src = op.join(
data_path, 'subjects', 'sample', 'bem', 'sample-oct-4-src.fif')
fname_fwd = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_small = op.join(base_dir, 'small-src.fif.gz')
fname_ave = op.join(base_dir, 'test-ave.fif')
rng = np.random.RandomState(0)
@testing.requires_testing_data
@pytest.mark.parametrize('picks, limits', [
('meg', (0.02, 0.250)),
(None, (0.01, 0.250)), # should be same as EEG
('eeg', (0.01, 0.250)),
])
def test_compute_distance_to_sensors(picks, limits):
"""Test computation of distances between vertices and sensors."""
src = read_source_spaces(fname_src)
fwd = mne.read_forward_solution(fname_fwd)
info = fwd['info']
trans = read_trans(trans_fname)
# trans = fwd['info']['mri_head_t']
if isinstance(picks, str):
kwargs = dict()
kwargs[picks] = True
if picks == 'eeg':
info['dev_head_t'] = None # should not break anything
use_picks = pick_types(info, **kwargs, exclude=())
else:
use_picks = picks
n_picks = len(_picks_to_idx(info, use_picks, 'data', exclude=()))
# Make sure same vertices are used in src and fwd
src[0]['inuse'] = fwd['src'][0]['inuse']
src[1]['inuse'] = fwd['src'][1]['inuse']
src[0]['nuse'] = fwd['src'][0]['nuse']
src[1]['nuse'] = fwd['src'][1]['nuse']
n_verts = src[0]['nuse'] + src[1]['nuse']
# minimum distances between vertices and sensors
depths = compute_distance_to_sensors(src, info=info, picks=use_picks,
trans=trans)
assert depths.shape == (n_verts, n_picks)
assert limits[0] * 5 > depths.min() # meaningful choice of limits
assert_array_less(limits[0], depths)
assert_array_less(depths, limits[1])
# If source space from Forward Solution and trans=None (i.e. identity) then
# depths2 should be the same as depth.
depths2 = compute_distance_to_sensors(src=fwd['src'], info=info,
picks=use_picks, trans=None)
assert_allclose(depths, depths2, rtol=1e-5)
if picks != 'eeg':
# this should break things
info['dev_head_t'] = None
with pytest.raises(ValueError,
match='Transform between meg<->head'):
compute_distance_to_sensors(src, info, use_picks, trans)
@testing.requires_testing_data
@requires_nibabel()
def test_mgz_header():
"""Test MGZ header reading."""
import nibabel
header = _get_mgz_header(fname_mri)
mri_hdr = nibabel.load(fname_mri).header
assert_allclose(mri_hdr.get_data_shape(), header['dims'])
assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr'])
assert_allclose(mri_hdr.get_ras2vox(), np.linalg.inv(header['vox2ras']))
def _read_small_src(remove=True):
src = read_source_spaces(fname_small)
if remove:
for s in src:
s['nearest'] = None
s['nearest_dist'] = None
s['pinfo'] = None
return src
def test_add_patch_info(monkeypatch):
"""Test adding patch info to source space."""
# let's setup a small source space
src = _read_small_src(remove=False)
src_new = _read_small_src()
# test that no patch info is added for small dist_limit
add_source_space_distances(src_new, dist_limit=0.00001)
assert all(s['nearest'] is None for s in src_new)
assert all(s['nearest_dist'] is None for s in src_new)
assert all(s['pinfo'] is None for s in src_new)
# now let's use one that works (and test our warning-throwing)
with monkeypatch.context() as m:
m.setattr(mne.source_space, '_DIST_WARN_LIMIT', 1)
with pytest.warns(RuntimeWarning, match='Computing distances for 258'):
add_source_space_distances(src_new)
_compare_source_spaces(src, src_new, 'approx')
# Old SciPy can't do patch info only
src_new = _read_small_src()
with monkeypatch.context() as m:
m.setattr(scipy, '__version__', '1.0')
with pytest.raises(RuntimeError, match='required to calculate patch '):
add_source_space_distances(src_new, dist_limit=0)
# New SciPy can
if check_version('scipy', '1.3'):
src_nodist = src.copy()
for s in src_nodist:
for key in ('dist', 'dist_limit'):
s[key] = None
add_source_space_distances(src_new, dist_limit=0)
_compare_source_spaces(src, src_new, 'approx')
@testing.requires_testing_data
def test_add_source_space_distances_limited(tmpdir):
"""Test adding distances to source space with a dist_limit."""
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 200 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = tmpdir.join('temp-src.fif')
add_source_space_distances(src_new, dist_limit=0.007)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
for so, sn in zip(src, src_new):
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code
do.data[do.data > 0.007] = 0
do.eliminate_zeros()
# make sure we have some comparable distances
assert np.sum(do.data < 0.007) > 400
# do comparison over the region computed
d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_add_source_space_distances(tmpdir):
"""Test adding distances to source space."""
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 19 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = tmpdir.join('temp-src.fif')
n_jobs = 2
assert n_do % n_jobs != 0
with pytest.raises(ValueError, match='non-negative'):
add_source_space_distances(src_new, dist_limit=-1)
add_source_space_distances(src_new, n_jobs=n_jobs)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
# iterate over both hemispheres
for so, sn in zip(src, src_new):
v = so['vertno'][:n_do]
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code (some residual), and Python
ds = list()
for d in [do, dn]:
d.data[d.data > 0.007] = 0
d = d[v][:, v]
d.eliminate_zeros()
ds.append(d)
# make sure we actually calculated some comparable distances
assert np.sum(ds[0].data < 0.007) > 10
# do comparison
d = ds[0] - ds[1]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space(tmpdir):
"""Test setting up (and reading/writing) discrete source spaces."""
src = read_source_spaces(fname)
v = src[0]['vertno']
# let's make a discrete version with the C code, and with ours
temp_name = tmpdir.join('temp-src.fif')
# save
temp_pos = tmpdir.join('temp-pos.txt')
np.savetxt(str(temp_pos), np.c_[src[0]['rr'][v], src[0]['nn'][v]])
# let's try the spherical one (no bem or surf supplied)
run_subprocess(['mne_volume_source_space', '--meters',
'--pos', temp_pos, '--src', temp_name])
src_c = read_source_spaces(temp_name)
pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
src_new = setup_volume_source_space(pos=pos_dict)
assert src_new.kind == 'discrete'
_compare_source_spaces(src_c, src_new, mode='approx')
assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
rtol=1e-3, atol=1e-6)
assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
rtol=1e-3, atol=1e-6)
# now do writing
write_source_spaces(temp_name, src_c, overwrite=True)
src_c2 = read_source_spaces(temp_name)
_compare_source_spaces(src_c, src_c2)
# now do MRI
pytest.raises(ValueError, setup_volume_source_space, 'sample',
pos=pos_dict, mri=fname_mri)
assert repr(src_new).split('~')[0] == repr(src_c).split('~')[0]
assert ' kB' in repr(src_new)
assert src_new.kind == 'discrete'
assert _get_src_type(src_new, None) == 'discrete'
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_source_space(tmpdir):
"""Test setting up volume source spaces."""
src = read_source_spaces(fname_vol)
temp_name = tmpdir.join('temp-src.fif')
surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
surf['rr'] *= 1e3 # convert to mm
bem_sol = read_bem_solution(fname_bem_3_sol)
bem = read_bem_solution(fname_bem_sol)
# The one in the testing dataset (uses bem as bounds)
for this_bem, this_surf in zip(
(bem, fname_bem, fname_bem_3, bem_sol, fname_bem_3_sol, None),
(None, None, None, None, None, surf)):
src_new = setup_volume_source_space(
'sample', pos=7.0, bem=this_bem, surface=this_surf,
subjects_dir=subjects_dir)
write_source_spaces(temp_name, src_new, overwrite=True)
src[0]['subject_his_id'] = 'sample' # XXX: to make comparison pass
_compare_source_spaces(src, src_new, mode='approx')
del src_new
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
with pytest.raises(IOError, match='surface file.*not found'):
setup_volume_source_space(
'sample', surface='foo', mri=fname_mri, subjects_dir=subjects_dir)
bem['surfs'][-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
with pytest.raises(ValueError, match='BEM is not in MRI coord.* got head'):
setup_volume_source_space(
'sample', bem=bem, mri=fname_mri, subjects_dir=subjects_dir)
bem['surfs'] = bem['surfs'][:-1] # no inner skull surf
with pytest.raises(ValueError, match='Could not get inner skul.*from BEM'):
setup_volume_source_space(
'sample', bem=bem, mri=fname_mri, subjects_dir=subjects_dir)
del bem
assert repr(src) == repr(src_new)
assert ' MB' in repr(src)
assert src.kind == 'volume'
# Spheres
sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
src = setup_volume_source_space(pos=10, sphere=(0., 0., 0., 0.09))
src_new = setup_volume_source_space(pos=10, sphere=sphere)
_compare_source_spaces(src, src_new, mode='exact')
with pytest.raises(ValueError, match='sphere, if str'):
setup_volume_source_space(sphere='foo')
# Need a radius
sphere = make_sphere_model(head_radius=None)
with pytest.raises(ValueError, match='be spherical with multiple layers'):
setup_volume_source_space(sphere=sphere)
@testing.requires_testing_data
@requires_mne
def test_other_volume_source_spaces(tmpdir):
"""Test setting up other volume source spaces."""
# these are split off because they require the MNE tools, and
# Travis doesn't seem to like them
# let's try the spherical one (no bem or surf supplied)
temp_name = tmpdir.join('temp-src.fif')
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name,
'--mri', fname_mri])
src = read_source_spaces(temp_name)
sphere = (0., 0., 0., 0.09)
src_new = setup_volume_source_space(None, pos=7.0, mri=fname_mri,
subjects_dir=subjects_dir,
sphere=sphere)
# we use a more accurate elimination criteria, so let's fix the MNE-C
# source space
assert len(src_new[0]['vertno']) == 7497
assert len(src) == 1
assert len(src_new) == 1
good_mask = np.in1d(src[0]['vertno'], src_new[0]['vertno'])
src[0]['inuse'][src[0]['vertno'][~good_mask]] = 0
assert src[0]['inuse'].sum() == 7497
src[0]['vertno'] = src[0]['vertno'][good_mask]
assert len(src[0]['vertno']) == 7497
src[0]['nuse'] = len(src[0]['vertno'])
assert src[0]['nuse'] == 7497
_compare_source_spaces(src_new, src, mode='approx')
assert 'volume, shape' in repr(src)
del src
del src_new
pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=7.0,
sphere=[1., 1.], mri=fname_mri, # bad sphere
subjects_dir=subjects_dir)
# now without MRI argument, it should give an error when we try
# to read it
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name])
pytest.raises(ValueError, read_source_spaces, temp_name)
@pytest.mark.timeout(60) # can be slow on OSX Travis
@pytest.mark.slowtest
@testing.requires_testing_data
def test_triangle_neighbors():
"""Test efficient vertex neighboring triangles for surfaces."""
this = read_source_spaces(fname)[0]
this['neighbor_tri'] = [list() for _ in range(this['np'])]
for p in range(this['ntri']):
verts = this['tris'][p]
this['neighbor_tri'][verts[0]].append(p)
this['neighbor_tri'][verts[1]].append(p)
this['neighbor_tri'][verts[2]].append(p)
this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
assert all(np.array_equal(nt1, nt2)
for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri']))
def test_accumulate_normals():
"""Test efficient normal accumulation for surfaces."""
# set up comparison
n_pts = int(1.6e5) # approx number in sample source space
n_tris = int(3.2e5)
# use all positive to make a worst-case for cumulative summation
# (real "nn" vectors will have both positive and negative values)
tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
tris = np.c_[tris, tris + 1, tris + 2]
tri_nn = rng.rand(n_tris, 3)
this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)
# cut-and-paste from original code in surface.py:
# Find neighboring triangles and accumulate vertex normals
this['nn'] = np.zeros((this['np'], 3))
for p in range(this['ntri']):
# vertex normals
verts = this['tris'][p]
this['nn'][verts, :] += this['tri_nn'][p, :]
nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
# the moment of truth (or reckoning)
assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_setup_source_space(tmpdir):
"""Test setting up ico, oct, and all source spaces."""
fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
# first lets test some input params
for spacing in ('oct', 'oct6e'):
with pytest.raises(ValueError, match='subdivision must be an integer'):
setup_source_space('sample', spacing=spacing,
add_dist=False, subjects_dir=subjects_dir)
for spacing in ('oct0', 'oct-4'):
with pytest.raises(ValueError, match='oct subdivision must be >= 1'):
setup_source_space('sample', spacing=spacing,
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='ico subdivision must be >= 0'):
setup_source_space('sample', spacing='ico-4',
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='must be a string with values'):
setup_source_space('sample', spacing='7emm',
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='must be a string with values'):
setup_source_space('sample', spacing='alls',
add_dist=False, subjects_dir=subjects_dir)
# ico 5 (fsaverage) - write to temp file
src = read_source_spaces(fname_ico)
with pytest.warns(None): # sklearn equiv neighbors
src_new = setup_source_space('fsaverage', spacing='ico5',
subjects_dir=subjects_dir, add_dist=False)
_compare_source_spaces(src, src_new, mode='approx')
assert repr(src).split('~')[0] == repr(src_new).split('~')[0]
assert repr(src).count('surface (') == 2
assert_array_equal(src[0]['vertno'], np.arange(10242))
assert_array_equal(src[1]['vertno'], np.arange(10242))
# oct-6 (sample) - auto filename + IO
src = read_source_spaces(fname)
temp_name = tmpdir.join('temp-src.fif')
with pytest.warns(None): # sklearn equiv neighbors
src_new = setup_source_space('sample', spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
write_source_spaces(temp_name, src_new, overwrite=True)
assert_equal(src_new[0]['nuse'], 4098)
_compare_source_spaces(src, src_new, mode='approx', nearest=False)
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx', nearest=False)
# all source points - no file writing
src_new = setup_source_space('sample', spacing='all',
subjects_dir=subjects_dir, add_dist=False)
assert src_new[0]['nuse'] == len(src_new[0]['rr'])
assert src_new[1]['nuse'] == len(src_new[1]['rr'])
# dense source space to hit surf['inuse'] lines of _create_surf_spacing
pytest.raises(RuntimeError, setup_source_space, 'sample',
spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
@testing.requires_testing_data
@requires_mne
@pytest.mark.slowtest
@pytest.mark.timeout(60)
@pytest.mark.parametrize('spacing', [2, 7])
def test_setup_source_space_spacing(tmpdir, spacing):
"""Test setting up surface source spaces using a given spacing."""
copytree(op.join(subjects_dir, 'sample'), str(tmpdir.join('sample')))
args = [] if spacing == 7 else ['--spacing', str(spacing)]
with modified_env(SUBJECTS_DIR=str(tmpdir), SUBJECT='sample'):
run_subprocess(['mne_setup_source_space'] + args)
src = read_source_spaces(tmpdir.join('sample', 'bem',
'sample-%d-src.fif' % spacing))
src_new = setup_source_space('sample', spacing=spacing, add_dist=False,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx', nearest=True)
# Degenerate conditions
with pytest.raises(TypeError, match='spacing must be.*got.*float.*'):
setup_source_space('sample', 7., subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='spacing must be >= 2, got 1'):
setup_source_space('sample', 1, subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_source_spaces():
"""Test reading of source space meshes."""
src = read_source_spaces(fname, patch_stats=True)
# 3D source space
lh_points = src[0]['rr']
lh_faces = src[0]['tris']
lh_use_faces = src[0]['use_tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
rh_use_faces = src[1]['use_tris']
assert lh_faces.min() == 0
assert lh_faces.max() == lh_points.shape[0] - 1
assert lh_use_faces.min() >= 0
assert lh_use_faces.max() <= lh_points.shape[0] - 1
assert rh_faces.min() == 0
assert rh_faces.max() == rh_points.shape[0] - 1
assert rh_use_faces.min() >= 0
assert rh_use_faces.max() <= rh_points.shape[0] - 1
@pytest.mark.slowtest
@testing.requires_testing_data
def test_write_source_space(tmpdir):
"""Test reading and writing of source spaces."""
src0 = read_source_spaces(fname, patch_stats=False)
temp_fname = tmpdir.join('tmp-src.fif')
write_source_spaces(temp_fname, src0)
src1 = read_source_spaces(temp_fname, patch_stats=False)
_compare_source_spaces(src0, src1)
# test warnings on bad filenames
src_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-src.fif'):
write_source_spaces(src_badname, src0)
with pytest.warns(RuntimeWarning, match='-src.fif'):
read_source_spaces(src_badname)
@testing.requires_testing_data
def test_vertex_to_mni():
"""Test conversion of vertices to MNI coordinates."""
# obtained using "tksurfer (sample) (l/r)h white"
vertices = [100960, 7620, 150549, 96761]
coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
[-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
hemis = [0, 0, 0, 1]
coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir)
# less than 1mm error
assert_allclose(coords, coords_2, atol=1.0)
@testing.requires_testing_data
def test_head_to_mni():
"""Test conversion of aseg vertices to MNI coordinates."""
# obtained using freeview
coords = np.array([[22.52, 11.24, 17.72], [22.52, 5.46, 21.58],
[16.10, 5.46, 22.23], [21.24, 8.36, 22.23]]) / 1000.
xfm = read_talxfm('sample', subjects_dir)
coords_MNI = apply_trans(xfm['trans'], coords) * 1000.
mri_head_t, _ = _get_trans(trans_fname, 'mri', 'head', allow_none=False)
# obtained from sample_audvis-meg-oct-6-mixed-fwd.fif
coo_right_amygdala = np.array([[0.01745682, 0.02665809, 0.03281873],
[0.01014125, 0.02496262, 0.04233755],
[0.01713642, 0.02505193, 0.04258181],
[0.01720631, 0.03073877, 0.03850075]])
coords_MNI_2 = head_to_mni(coo_right_amygdala, 'sample', mri_head_t,
subjects_dir)
# less than 1mm error
assert_allclose(coords_MNI, coords_MNI_2, atol=10.0)
@testing.requires_testing_data
def test_vertex_to_mni_fs_nibabel(monkeypatch):
"""Test equivalence of vert_to_mni for nibabel and freesurfer."""
n_check = 1000
subject = 'sample'
vertices = rng.randint(0, 100000, n_check)
hemis = rng.randint(0, 1, n_check)
coords = vertex_to_mni(vertices, hemis, subject, subjects_dir)
monkeypatch.setattr(mne.source_space, 'has_nibabel', lambda: False)
coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir)
# less than 0.1 mm error
assert_allclose(coords, coords_2, atol=0.1)
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('fname', [
None,
op.join(op.dirname(mne.__file__), 'data', 'FreeSurferColorLUT.txt'),
])
def test_read_freesurfer_lut(fname, tmpdir):
"""Test reading volume label names."""
atlas_ids, colors = read_freesurfer_lut(fname)
assert list(atlas_ids).count('Brain-Stem') == 1
assert len(colors) == len(atlas_ids) == 1266
label_names, label_colors = get_volume_labels_from_aseg(
aseg_fname, return_colors=True)
assert isinstance(label_names, list)
assert isinstance(label_colors, list)
assert label_names.count('Brain-Stem') == 1
for c in label_colors:
assert isinstance(c, np.ndarray)
assert c.shape == (4,)
assert len(label_names) == len(label_colors) == 46
with pytest.raises(ValueError, match='must be False'):
get_volume_labels_from_aseg(
aseg_fname, return_colors=True, atlas_ids=atlas_ids)
label_names_2 = get_volume_labels_from_aseg(
aseg_fname, atlas_ids=atlas_ids)
assert label_names == label_names_2
# long name (only test on one run)
if fname is not None:
return
fname = str(tmpdir.join('long.txt'))
names = ['Anterior_Cingulate_and_Medial_Prefrontal_Cortex-' + hemi
for hemi in ('lh', 'rh')]
ids = np.arange(1, len(names) + 1)
colors = [(id_,) * 4 for id_ in ids]
with open(fname, 'w') as fid:
for name, id_, color in zip(names, ids, colors):
out_color = ' '.join('%3d' % x for x in color)
line = '%d %s %s\n' % (id_, name, out_color)
fid.write(line)
lut, got_colors = read_freesurfer_lut(fname)
assert len(lut) == len(got_colors) == len(names) == len(ids)
for name, id_, color in zip(names, ids, colors):
assert name in lut
assert name in got_colors
assert_array_equal(got_colors[name][:3], color[:3])
assert lut[name] == id_
with open(fname, 'w') as fid:
for name, id_, color in zip(names, ids, colors):
out_color = ' '.join('%3d' % x for x in color[:3]) # wrong length!
line = '%d %s %s\n' % (id_, name, out_color)
fid.write(line)
with pytest.raises(RuntimeError, match='formatted'):
read_freesurfer_lut(fname)
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('pass_ids', (True, False))
def test_source_space_from_label(tmpdir, pass_ids):
"""Test generating a source space from volume label."""
aseg_short = 'aseg.mgz'
atlas_ids, _ = read_freesurfer_lut()
volume_label = 'Left-Cerebellum-Cortex'
# Test pos as dict
pos = dict()
with pytest.raises(ValueError, match='mri must be None if pos is a dict'):
setup_volume_source_space(
'sample', pos=pos, volume_label=volume_label, mri=aseg_short,
subjects_dir=subjects_dir)
# Test T1.mgz provided
with pytest.raises(RuntimeError, match=r'Must use a \*aseg.mgz file'):
setup_volume_source_space(
'sample', mri='T1.mgz', volume_label=volume_label,
subjects_dir=subjects_dir)
# Test invalid volume label
mri = aseg_short
with pytest.raises(ValueError, match="'Left-Cerebral' not found.*Did you"):
setup_volume_source_space(
'sample', volume_label='Left-Cerebral', mri=mri,
subjects_dir=subjects_dir)
# These should be equivalent
if pass_ids:
use_volume_label = {volume_label: atlas_ids[volume_label]}
else:
use_volume_label = volume_label
# ensure it works even when not provided (detect that it should be aseg)
src = setup_volume_source_space(
'sample', volume_label=use_volume_label, add_interpolator=False,
subjects_dir=subjects_dir)
assert_equal(volume_label, src[0]['seg_name'])
assert src[0]['nuse'] == 404 # for our given pos and label
# test reading and writing
out_name = tmpdir.join('temp-src.fif')
write_source_spaces(out_name, src)
src_from_file = read_source_spaces(out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
@testing.requires_testing_data
@requires_nibabel()
def test_source_space_exclusive_complete(src_volume_labels):
"""Test that we produce exclusive and complete labels."""
# these two are neighbors and are quite large, so let's use them to
# ensure no overlaps
src, volume_labels, _ = src_volume_labels
ii = volume_labels.index('Left-Cerebral-White-Matter')
jj = volume_labels.index('Left-Cerebral-Cortex')
assert src[ii]['nuse'] == 755 # 2034 with pos=5, was 2832
assert src[jj]['nuse'] == 616 # 1520 with pos=5, was 2623
src_full = read_source_spaces(fname_vol)
# This implicitly checks for overlap because np.sort would preserve
# duplicates, and it checks for completeness because the sets should match
assert_array_equal(src_full[0]['vertno'],
np.sort(np.concatenate([s['vertno'] for s in src])))
for si, s in enumerate(src):
assert_allclose(src_full[0]['rr'], s['rr'], atol=1e-6)
# also check single_volume=True -- should be the same result
src_single = setup_volume_source_space(
src[0]['subject_his_id'], 7., 'aseg.mgz', bem=fname_bem,
volume_label=volume_labels, single_volume=True, add_interpolator=False,
subjects_dir=subjects_dir)
assert len(src_single) == 1
assert 'Unknown+Left-Cerebral-White-Matter+Left-' in repr(src_single)
assert_array_equal(src_full[0]['vertno'], src_single[0]['vertno'])
@pytest.mark.timeout(60) # ~24 sec on Travis
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_read_volume_from_src():
"""Test reading volumes from a mixed source space."""
labels_vol = ['Left-Amygdala',
'Brain-Stem',
'Right-Amygdala']
src = read_source_spaces(fname)
# Setup a volume source space
vol_src = setup_volume_source_space('sample', mri=aseg_fname,
pos=5.0,
bem=fname_bem,
volume_label=labels_vol,
subjects_dir=subjects_dir)
# Generate the mixed source space, testing some list methods
assert src.kind == 'surface'
assert vol_src.kind == 'volume'
src += vol_src
assert src.kind == 'mixed'
assert vol_src.kind == 'volume'
assert src[:2].kind == 'surface'
assert src[2:].kind == 'volume'
assert src[:].kind == 'mixed'
with pytest.raises(RuntimeError, match='Invalid source space'):
src[::2]
volume_src = get_volume_labels_from_src(src, 'sample', subjects_dir)
volume_label = volume_src[0].name
volume_label = 'Left-' + volume_label.replace('-lh', '')
# Test
assert_equal(volume_label, src[2]['seg_name'])
assert_equal(src[2]['type'], 'vol')
@testing.requires_testing_data
@requires_nibabel()
def test_combine_source_spaces(tmpdir):
"""Test combining source spaces."""
import nibabel as nib
rng = np.random.RandomState(2)
volume_labels = ['Brain-Stem', 'Right-Hippocampus'] # two fairly large
# create a sparse surface source space to ensure all get mapped
# when mri_resolution=False
srf = setup_source_space('sample', 'oct3', add_dist=False,
subjects_dir=subjects_dir)
# setup 2 volume source spaces
vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_labels[0],
mri=aseg_fname, add_interpolator=False)
# setup a discrete source space
rr = rng.randint(0, 11, (20, 3)) * 5e-3
nn = np.zeros(rr.shape)
nn[:, -1] = 1
pos = {'rr': rr, 'nn': nn}
disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
pos=pos, verbose='error')
# combine source spaces
assert srf.kind == 'surface'
assert vol.kind == 'volume'
assert disc.kind == 'discrete'
src = srf + vol + disc
assert src.kind == 'mixed'
assert srf.kind == 'surface'
assert vol.kind == 'volume'
assert disc.kind == 'discrete'
# test addition of source spaces
assert len(src) == 4
# test reading and writing
src_out_name = tmpdir.join('temp-src.fif')
src.save(src_out_name)
src_from_file = read_source_spaces(src_out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
assert repr(src).split('~')[0] == repr(src_from_file).split('~')[0]
assert_equal(src.kind, 'mixed')
# test that all source spaces are in MRI coordinates
coord_frames = np.array([s['coord_frame'] for s in src])
assert (coord_frames == FIFF.FIFFV_COORD_MRI).all()
# test errors for export_volume
image_fname = tmpdir.join('temp-image.mgz')
# source spaces with no volume
with pytest.raises(ValueError, match='at least one volume'):
srf.export_volume(image_fname, verbose='error')
# unrecognized source type
disc2 = disc.copy()
disc2[0]['type'] = 'kitty'
with pytest.raises(ValueError, match='Invalid value'):
src + disc2
del disc2
# unrecognized file type
bad_image_fname = tmpdir.join('temp-image.png')
# vertices outside vol space warning
pytest.raises(ValueError, src.export_volume, bad_image_fname,
verbose='error')
# mixed coordinate frames
disc3 = disc.copy()
disc3[0]['coord_frame'] = 10
src_mixed_coord = src + disc3
with pytest.raises(ValueError, match='must be in head coordinates'):
src_mixed_coord.export_volume(image_fname, verbose='error')
# now actually write it
fname_img = tmpdir.join('img.nii')
for mri_resolution in (False, 'sparse', True):
for src, up in ((vol, 705),
(srf + vol, 27272),
(disc + vol, 705)):
src.export_volume(
fname_img, use_lut=False,
mri_resolution=mri_resolution, overwrite=True)
img_data = _get_img_fdata(nib.load(str(fname_img)))
n_src = img_data.astype(bool).sum()
n_want = sum(s['nuse'] for s in src)
if mri_resolution is True:
n_want += up
assert n_src == n_want, src
# gh-8004
temp_aseg = tmpdir.join('aseg.mgz')
aseg_img = nib.load(aseg_fname)
aseg_affine = aseg_img.affine
aseg_affine[:3, :3] *= 0.7
new_aseg = nib.MGHImage(aseg_img.dataobj, aseg_affine)
nib.save(new_aseg, str(temp_aseg))
lh_cereb = mne.setup_volume_source_space(
"sample", mri=temp_aseg, volume_label="Left-Cerebellum-Cortex",
add_interpolator=False, subjects_dir=subjects_dir)
src = srf + lh_cereb
with pytest.warns(RuntimeWarning, match='2 surf vertices lay outside'):
src.export_volume(image_fname, mri_resolution="sparse", overwrite=True)
@testing.requires_testing_data
def test_morph_source_spaces():
"""Test morphing of source spaces."""
src = read_source_spaces(fname_fs)
src_morph = read_source_spaces(fname_morph)
src_morph_py = morph_source_spaces(src, 'sample',
subjects_dir=subjects_dir)
_compare_source_spaces(src_morph, src_morph_py, mode='approx')
@pytest.mark.timeout(60) # can be slow on OSX Travis
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morphed_source_space_return():
"""Test returning a morphed source space to the original subject."""
# let's create some random data on fsaverage
data = rng.randn(20484, 1)
tmin, tstep = 0, 1.
src_fs = read_source_spaces(fname_fs)
stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
tmin, tstep, 'fsaverage')
n_verts_fs = sum(len(s['vertno']) for s in src_fs)
# Create our morph source space
src_morph = morph_source_spaces(src_fs, 'sample',
subjects_dir=subjects_dir)
n_verts_sample = sum(len(s['vertno']) for s in src_morph)
assert n_verts_fs == n_verts_sample
# Morph the data over using standard methods
stc_morph = compute_source_morph(
src_fs, 'fsaverage', 'sample',
spacing=[s['vertno'] for s in src_morph], smooth=1,
subjects_dir=subjects_dir, warn=False).apply(stc_fs)
assert stc_morph.data.shape[0] == n_verts_sample
# We can now pretend like this was real data we got e.g. from an inverse.
# To be complete, let's remove some vertices
keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
for v in stc_morph.vertices]
stc_morph = SourceEstimate(
np.concatenate([stc_morph.lh_data[keeps[0]],
stc_morph.rh_data[keeps[1]]]),
[v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
'sample')
# Return it to the original subject
stc_morph_return = stc_morph.to_original_src(
src_fs, subjects_dir=subjects_dir)
# This should fail (has too many verts in SourceMorph)
with pytest.warns(RuntimeWarning, match='vertices not included'):
morph = compute_source_morph(
src_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='vertices do not match'):
morph.apply(stc_morph)
# Compare to the original data
with pytest.warns(RuntimeWarning, match='vertices not included'):
stc_morph_morph = compute_source_morph(
src=stc_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir).apply(stc_morph)
assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Explicitly test having two vertices map to the same target vertex. We
# simulate this by having two vertices be at the same position.
src_fs2 = src_fs.copy()
vert1, vert2 = src_fs2[0]['vertno'][:2]
src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2]
stc_morph_return = stc_morph.to_original_src(
src_fs2, subjects_dir=subjects_dir)
# test to_original_src method result equality
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Degenerate cases
stc_morph.subject = None # no .subject provided
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
stc_morph.subject = 'sample'
del src_fs[0]['subject_his_id'] # no name in src_fsaverage
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='foo', subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'sample'
src = read_source_spaces(fname) # wrong source space
pytest.raises(RuntimeError, stc_morph.to_original_src,
src, subjects_dir=subjects_dir)
run_tests_if_main()
# The following code was used to generate small-src.fif.gz.
# Unfortunately the C code bombs when trying to add source space distances,
# possibly due to incomplete "faking" of a smaller surface on our part here.
"""
# -*- coding: utf-8 -*-
import os
import numpy as np
import mne
data_path = mne.datasets.sample.data_path()
src = mne.setup_source_space('sample', fname=None, spacing='oct5')
hemis = ['lh', 'rh']
fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
vs = list()
for s, fname in zip(src, fnames):
coords = s['rr'][s['vertno']]
vs.append(s['vertno'])
idx = -1 * np.ones(len(s['rr']))
idx[s['vertno']] = np.arange(s['nuse'])
faces = s['use_tris']
faces = idx[faces]
mne.write_surface(fname, coords, faces)
# we need to move sphere surfaces
spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
for s in spheres:
os.rename(s, s + '.bak')
try:
for s, v in zip(spheres, vs):
coords, faces = mne.read_surface(s + '.bak')
coords = coords[v]
mne.write_surface(s, coords, faces)
src = mne.setup_source_space('sample', fname=None, spacing='oct4',
surface='decimated')
finally:
for s in spheres:
os.rename(s + '.bak', s)
fname = 'small-src.fif'
fname_gz = fname + '.gz'
mne.write_source_spaces(fname, src)
mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
'--srcp', fname])
mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
"""
| bsd-3-clause |
abidrahmank/MyRoughWork | roughnote/corner_detectors/opencv_harris_comparison.py | 1 | 1610 | """ Rough implementation of Harris Corner Detector and Shi-Tomasi
More faster than Harris.py (>10x) """
import cv2,time,sys
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('sofsign.jpg',0)
img2 = img.copy() # for opencv
#img = cv2.equalizeHist(img)
rows,cols = img.shape
img = cv2.GaussianBlur(img,(5,5),1)
t = time.time()
# Find Ix, Iy
Ix = cv2.Sobel(img,5,1,0)
Iy = cv2.Sobel(img,5,0,1)
# Find Ix2, Iy2, IxIy
Ix2 = Ix*Ix
Iy2 = Iy*Iy
IxIy = Ix*Iy
# Convolve them with a larger Gaussian Window
a = Ix2 = cv2.GaussianBlur(Ix2,(5,5),1)
d = Iy2 = cv2.GaussianBlur(Iy2,(5,5),1)
b = c = IxIy = cv2.GaussianBlur(IxIy,(5,5),1)
Trace = a+d
Det = a*d - np.square(b)
#R = np.abs(Trace-0.04*np.square(Det))
R = Det/(Trace+1)
cv2.normalize(R,R,0,1,cv2.NORM_MINMAX)
R = np.where(R>0.7,255,0)
R = np.uint8(R)
t2 = time.time()
print " time taken self:", t2 - t
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
contours, hierarchy = cv2.findContours(R,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
m = cv2.moments(cnt)
if m['m00'] != 0:
x = int(m['m10']/m['m00'])
y = int(m['m01']/m['m00'])
else:
x,y = cnt.flatten()[:2]
cv2.circle(img,(x,y),2,255,-1)
cv2.imshow("my_implementation",R)
cv2.imshow('img',img)
################### OPENCV PART #################
t = time.time()
dst = cv2.cornerHarris(img2,5,5,0.04)
dst = cv2.preCornerDetect(img2,5)
t2 = time.time()
ret,dst = cv2.threshold(dst,0.7*dst.max(),255,0)
#dst = cv2.preCornerDetect(img2,3)
print "opencv time : ",t2 - t
cv2.imshow("opencv",dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| mit |
icdishb/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
strongh/GPy | GPy/models/bayesian_gplvm.py | 4 | 10052 | # Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .. import kern
from ..core.sparse_gp_mpi import SparseGP_MPI
from ..likelihoods import Gaussian
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
import logging
class BayesianGPLVM(SparseGP_MPI):
"""
Bayesian Gaussian Process Latent Variable Model
:param Y: observed data (np.ndarray) or GPy.likelihood
:type Y: np.ndarray| GPy.likelihood instance
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
Z=None, kernel=None, inference_method=None, likelihood=None,
name='bayesian gplvm', mpi_comm=None, normalizer=None,
missing_data=False, stochastic=False, batchsize=1):
self.logger = logging.getLogger(self.__class__.__name__)
if X is None:
from ..util.initialization import initialize_latent
self.logger.info("initializing latent space X with method {}".format(init))
X, fracs = initialize_latent(init, input_dim, Y)
else:
fracs = np.ones(input_dim)
self.init = init
if X_variance is None:
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
X_variance = np.random.uniform(0,.1,X.shape)
if Z is None:
self.logger.info("initializing inducing inputs")
Z = np.random.permutation(X.copy())[:num_inducing]
assert Z.shape[1] == X.shape[1]
if kernel is None:
self.logger.info("initializing kernel RBF")
kernel = kern.RBF(input_dim, lengthscale=1./fracs, ARD=True) #+ kern.Bias(input_dim) + kern.White(input_dim)
if likelihood is None:
likelihood = Gaussian()
self.variational_prior = NormalPrior()
X = NormalPosterior(X, X_variance)
if inference_method is None:
if mpi_comm is not None:
inference_method = VarDTC_minibatch(mpi_comm=mpi_comm)
else:
from ..inference.latent_function_inference.var_dtc import VarDTC
self.logger.debug("creating inference_method var_dtc")
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
if isinstance(inference_method,VarDTC_minibatch):
inference_method.mpi_comm = mpi_comm
super(BayesianGPLVM,self).__init__(X, Y, Z, kernel, likelihood=likelihood,
name=name, inference_method=inference_method,
normalizer=normalizer, mpi_comm=mpi_comm,
variational_prior=self.variational_prior,
)
self.link_parameter(self.X, index=0)
def set_X_gradients(self, X, X_grad):
"""Set the gradients of the posterior distribution of X in its specific form."""
X.mean.gradient, X.variance.gradient = X_grad
def get_X_gradients(self, X):
"""Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient
def parameters_changed(self):
super(BayesianGPLVM,self).parameters_changed()
if isinstance(self.inference_method, VarDTC_minibatch):
return
kl_fctr = 1.
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(
variational_posterior=self.X,
Z=self.Z,
dL_dpsi0=self.grad_dict['dL_dpsi0'],
dL_dpsi1=self.grad_dict['dL_dpsi1'],
dL_dpsi2=self.grad_dict['dL_dpsi2'])
self.variational_prior.update_gradients_KL(self.X)
#super(BayesianGPLVM, self).parameters_changed()
#self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
#self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
# This is testing code -------------------------
# i = np.random.randint(self.X.shape[0])
# X_ = self.X.mean
# which = np.sqrt(((X_ - X_[i:i+1])**2).sum(1)).argsort()>(max(0, self.X.shape[0]-51))
# _, _, grad_dict = self.inference_method.inference(self.kern, self.X[which], self.Z, self.likelihood, self.Y[which], self.Y_metadata)
# grad = self.kern.gradients_qX_expectations(variational_posterior=self.X[which], Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'])
#
# self.X.mean.gradient[:] = 0
# self.X.variance.gradient[:] = 0
# self.X.mean.gradient[which] = grad[0]
# self.X.variance.gradient[which] = grad[1]
# update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X, which)
# -----------------------------------------------
# update for the KL divergence
#self.variational_prior.update_gradients_KL(self.X)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
def do_test_latents(self, Y):
"""
Compute the latent representation for a set of new points Y
Notes:
This will only work with a univariate Gaussian likelihood (for now)
"""
N_test = Y.shape[0]
input_dim = self.Z.shape[1]
means = np.zeros((N_test, input_dim))
covars = np.zeros((N_test, input_dim))
dpsi0 = -0.5 * self.input_dim / self.likelihood.variance
dpsi2 = self.grad_dict['dL_dpsi2'][0][None, :, :] # TODO: this may change if we ignore het. likelihoods
V = Y/self.likelihood.variance
#compute CPsi1V
#if self.Cpsi1V is None:
# psi1V = np.dot(self.psi1.T, self.likelihood.V)
# tmp, _ = linalg.dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0)
# tmp, _ = linalg.dpotrs(self.LB, tmp, lower=1)
# self.Cpsi1V, _ = linalg.dtrtrs(self._Lm, tmp, lower=1, trans=1)
dpsi1 = np.dot(self.posterior.woodbury_vector, V.T)
#start = np.zeros(self.input_dim * 2)
from scipy.optimize import minimize
for n, dpsi1_n in enumerate(dpsi1.T[:, :, None]):
args = (input_dim, self.kern.copy(), self.Z, dpsi0, dpsi1_n.T, dpsi2)
res = minimize(latent_cost_and_grad, jac=True, x0=np.hstack((means[n], covars[n])), args=args, method='BFGS')
xopt = res.x
mu, log_S = xopt.reshape(2, 1, -1)
means[n] = mu[0].copy()
covars[n] = np.exp(log_S[0]).copy()
X = NormalPosterior(means, covars)
return X
def dmu_dX(self, Xnew):
"""
Calculate the gradient of the prediction at Xnew w.r.t Xnew.
"""
dmu_dX = np.zeros_like(Xnew)
for i in range(self.Z.shape[0]):
dmu_dX += self.kern.gradients_X(self.grad_dict['dL_dpsi1'][i:i + 1, :], Xnew, self.Z[i:i + 1, :])
return dmu_dX
def dmu_dXnew(self, Xnew):
"""
Individual gradient of prediction at Xnew w.r.t. each sample in Xnew
"""
gradients_X = np.zeros((Xnew.shape[0], self.num_inducing))
ones = np.ones((1, 1))
for i in range(self.Z.shape[0]):
gradients_X[:, i] = self.kern.gradients_X(ones, Xnew, self.Z[i:i + 1, :]).sum(-1)
return np.dot(gradients_X, self.grad_dict['dL_dpsi1'])
def plot_steepest_gradient_map(self, *args, ** kwargs):
"""
See GPy.plotting.matplot_dep.dim_reduction_plots.plot_steepest_gradient_map
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_steepest_gradient_map(self,*args,**kwargs)
def latent_cost_and_grad(mu_S, input_dim, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):
"""
objective function for fitting the latent variables for test points
(negative log-likelihood: should be minimised!)
"""
mu = mu_S[:input_dim][None]
log_S = mu_S[input_dim:][None]
S = np.exp(log_S)
X = NormalPosterior(mu, S)
psi0 = kern.psi0(Z, X)
psi1 = kern.psi1(Z, X)
psi2 = kern.psi2(Z, X)
lik = dL_dpsi0 * psi0.sum() + np.einsum('ij,kj->...', dL_dpsi1, psi1) + np.einsum('ijk,lkj->...', dL_dpsi2, psi2) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S)
dLdmu, dLdS = kern.gradients_qX_expectations(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, X)
dmu = dLdmu - mu
# dS = S0 + S1 + S2 -0.5 + .5/S
dlnS = S * (dLdS - 0.5) + .5
return -lik, -np.hstack((dmu.flatten(), dlnS.flatten()))
| bsd-3-clause |
kevin-intel/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 6 | 5052 | """
============================
Gradient Boosting regression
============================
This example demonstrates Gradient Boosting to produce a predictive
model from an ensemble of weak predictive models. Gradient boosting can be used
for regression and classification problems. Here, we will train a model to
tackle a diabetes regression task. We will obtain the results from
:class:`~sklearn.ensemble.GradientBoostingRegressor` with least squares loss
and 500 regression trees of depth 4.
Note: For larger datasets (n_samples >= 10000), please refer to
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# Maria Telenczuk <https://github.com/maikia>
# Katrina Ni <https://github.com/nilichen>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, ensemble
from sklearn.inspection import permutation_importance
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# %%
# Load the data
# -------------------------------------
#
# First we need to load the data.
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# %%
# Data preprocessing
# -------------------------------------
#
# Next, we will split our dataset to use 90% for training and leave the rest
# for testing. We will also set the regression model parameters. You can play
# with these parameters to see how the results change.
#
# n_estimators : the number of boosting stages that will be performed.
# Later, we will plot deviance against boosting iterations.
#
# max_depth : limits the number of nodes in the tree.
# The best value depends on the interaction of the input variables.
#
# min_samples_split : the minimum number of samples required to split an
# internal node.
#
# learning_rate : how much the contribution of each tree will shrink.
#
# loss : loss function to optimize. The least squares function is used in this
# case however, there are many other options (see
# :class:`~sklearn.ensemble.GradientBoostingRegressor` ).
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=13)
params = {'n_estimators': 500,
'max_depth': 4,
'min_samples_split': 5,
'learning_rate': 0.01,
'loss': 'squared_error'}
# %%
# Fit regression model
# -------------------------------------
#
# Now we will initiate the gradient boosting regressors and fit it with our
# training data. Let's also look and the mean squared error on the test data.
reg = ensemble.GradientBoostingRegressor(**params)
reg.fit(X_train, y_train)
mse = mean_squared_error(y_test, reg.predict(X_test))
print("The mean squared error (MSE) on test set: {:.4f}".format(mse))
# %%
# Plot training deviance
# -------------------------------------
#
# Finally, we will visualize the results. To do that we will first compute the
# test set deviance and then plot it against boosting iterations.
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(reg.staged_predict(X_test)):
test_score[i] = reg.loss_(y_test, y_pred)
fig = plt.figure(figsize=(6, 6))
plt.subplot(1, 1, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, reg.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
fig.tight_layout()
plt.show()
# %%
# Plot feature importance
# -------------------------------------
#
# Careful, impurity-based feature importances can be misleading for
# high cardinality features (many unique values). As an alternative,
# the permutation importances of ``reg`` can be computed on a
# held out test set. See :ref:`permutation_importance` for more details.
#
# For this example, the impurity-based and permutation methods identify the
# same 2 strongly predictive features but not in the same order. The third most
# predictive feature, "bp", is also the same for the 2 methods. The remaining
# features are less predictive and the error bars of the permutation plot
# show that they overlap with 0.
feature_importance = reg.feature_importances_
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig = plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(diabetes.feature_names)[sorted_idx])
plt.title('Feature Importance (MDI)')
result = permutation_importance(reg, X_test, y_test, n_repeats=10,
random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
plt.subplot(1, 2, 2)
plt.boxplot(result.importances[sorted_idx].T,
vert=False, labels=np.array(diabetes.feature_names)[sorted_idx])
plt.title("Permutation Importance (test set)")
fig.tight_layout()
plt.show()
| bsd-3-clause |
CompPhysics/ComputationalPhysics | doc/Projects/2016/Project1/Examples/Project1.py | 8 | 3161 | # This python script reads from the command line the filename (its root) and
# the largest exponent of 10. This defines the number of mesh points.
# This script calls then an executable from a c++ or fortran code that solves
# a set of linear equations with a tridiagonal matrix defining the second derivative.
# It makes in turn the various plots as pdf files and finally sets up the basis for
# a report and its pertinent latex file
import sys, os
from matplotlib import pyplot as plt
import numpy as np
# Command line arguments using sys.argv[]
try:
filename = sys.argv[1]
exponent = int(sys.argv[2])
except:
print "Usage of this script", sys.argv[0], "infile", sys.argv[1], "Exponent", sys.argv[2]; sys.exit(1)
# Define command line text string
cmdline = './project1.x '+filename +' ' + str(exponent)
# Now run code, here c++ code which has been compiled and linked
cmd = cmdline
failure = os.system(cmd)
if failure:
print 'running project1 failed'; sys.exit(1)
# Start making figures looping over all exponents
for i in range(1,exponent+1):
# define files to open data and make plots to
fout = filename+str(i)
figfile = fout+".pdf"
data = np.loadtxt(fout)
x = data[:,0]
solution = data[:,1]
exact = data[:,2]
plt.axis([0,1,0, 1.0])
numericalplot = plt.plot(x, solution, 'r:.', linewidth = 2.0, label = 'Numerical')
exactplot = plt.plot(x, exact, 'm:v', linewidth = 2.0, label = 'Exact')
plt.xlabel(r'$x$')
plt.ylabel(r'Solutions')
plt.savefig(figfile)
# Then clean up
plt.clf()
# Now prepare latex file, r in front avoids backslashes being treated
# as control chars in strings. What follows are plain latex commands
preamb = r"""\documentclass[10pt,showpacs,preprintnumbers,footinbib,amsmath,amssymb,aps,prl,twocolumn,groupedaddress,superscriptaddress,showkeys]{revtex4-1}
\usepackage{graphicx}
\usepackage{dcolumn}
\usepackage{bm}
\usepackage[colorlinks=true,urlcolor=blue,citecolor=blue]{hyperref}
\usepackage{color}
\begin{document}
\title{Project 1}
\author{A.~N.~Author}
\affiliation{Department of Something, University of Somewhere, Outer Space}
\begin{abstract}
We present our Ferrari algorithm for solving linear equations. Our best algorithm runs as $4n$ FLOPS with $n$ the dimensionality of the matrix.
\end{abstract}
\maketitle
"""
figure = r"""\begin{figure}[hbtp]
\includegraphics[scale=0.4]{test1.pdf}
\caption{Exact and numerial solutions for $n=10$ mesh points.}
\label{fig:n10points}
\end{figure}
"""
introduction = r"""\section{Introduction}
"""
theory = r"""\section{Theory, algorithms and methods}
"""
results = r"""\section{Results and discussions}
"""
conclusions = r"""\section{Conclusions}
"""
references = r"""\begin{thebibliography}{99}
\bibitem{miller2006} G.~A.~Miller, A.~K.~Opper, and E.~J.~Stephenson, Annu.~Rev.~Nucl.~Sci.~{\bf 56}, 253 (2006).
\end{thebibliography}
"""
# Dump to file:
filename = 'ReportProject1'
f = file(filename + '.tex', "w")
f.write(preamb)
f.write(introduction)
f.write(theory)
f.write(results)
f.write(figure)
f.write(conclusions)
f.write(references)
f.write("""\end{document}""")
f.close()
| cc0-1.0 |
Tomasuh/Tomasuh.github.io | files/cyclic/analyse.py | 1 | 1922 | import pandas
import sqlite3
import dbcommands
import numpy as np
import time
import matplotlib.pyplot as plt
db_obj = dbcommands.the_db()
posts = db_obj.fetch_posts()
df = pandas.DataFrame(data=posts, columns = ["key",\
"title",\
"user",\
"date",\
"size",\
"syntax",\
"expire",\
"scrape_url",\
"full_url"])
df["size"] = df["size"].astype(int)
df = df.sort_values(by='date',ascending=True)
threshold = 10 - 1
max_diff_sec = 60
complete_ranges = []
for _, user_df in df.groupby("user"):
user = user_df["user"].iloc[0]
diff_middle = None
timestamps = []
repeated_intervals = 0
the_range = []
# Loop over grouped users
for __, row in user_df.iterrows():
current_ts = row["date"]
success = True
nr = 0
while nr < len(timestamps):
diff_to = (current_ts - timestamps[nr]).total_seconds()
avg_diff = diff_to/(len(timestamps) - nr)
if len(timestamps) == 1:
diff_middle = avg_diff
elif avg_diff > diff_middle + max_diff_sec or avg_diff < diff_middle - max_diff_sec:
success = False
break
nr += 1
if success:
timestamps.append(current_ts)
the_range.append((row["title"], current_ts, row["full_url"]))
repeated_intervals += 1
else:
if repeated_intervals >= threshold:
complete_ranges.append((row["user"], diff_middle, the_range))
the_range = the_range[:-1]
repeated_intervals = 0
timestamps = timestamps[:-1]
for user, diff_avg, cyclic_range in complete_ranges:
df_c = pandas.DataFrame(data=cyclic_range, columns = ["title",\
"time",\
"url"])
for title, time, url in cyclic_range:
print "%s %s %s %s" % (user, time, url, title)
plt.plot(df_c['time'], [diff_avg/60] * len(df_c['time']), 'ro')
plt.ylabel("Avg interval minutes")
plt.text(0.6, 0.8, user, fontsize=14, transform=plt.gcf().transFigure)
plt.show()
| mit |
hajicj/MUSCIMarker | MUSCIMarker/analyze_tracking_log.py | 1 | 17358 | #!/usr/bin/env python
"""This is a script that performs a quick and dirty analysis
of a MUSCIMarker event log.
What we want to know:
* Number of hours worked
* Speed: how much was done in total?
* Densities: frequency of events (calls) per minute/hour
* Clearly distinguish between user actions and internal tracked actions.
Visualizations:
* Timing visualization
Also, convert to CSV, to make it grep-able? First: fixed-name cols,
then: args dict, formatted as key=value,key=value
"""
from __future__ import print_function, unicode_literals
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import argparse
import codecs
import collections
import io
import itertools
import json
import logging
import numpy
import os
import pprint
import time
import matplotlib.pyplot as plt
import operator
from muscima.io import parse_cropobject_list
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
if __name__ != '__main__':
logger = logging.getLogger(__name__)
else:
# Defer after basicConfig call, before main() call
logger = None
def freqdict(l, sort=True):
out = collections.defaultdict(int)
for item in l:
out[item] += 1
if sort:
s_out = collections.OrderedDict()
for k, v in sorted(list(out.items()), key=operator.itemgetter(1), reverse=True):
s_out[k] = v
out = s_out
return out
##############################################################################
def is_annotation_package(path):
"""Checks that the given path is an annotation package."""
if not os.path.isdir(path):
return False
subdirs = os.listdir(path)
if 'source_images' not in subdirs:
return False
if 'annotations' not in subdirs:
return False
if 'annotation_logs' not in subdirs:
return False
return True
def logs_from_package(package):
"""Collects all log file names (with complete paths) from the given package.
:param package: Path to the annotations package.
:return: List of filenames (full paths).
"""
logger.info('Collecting log files from package {0}'.format(package))
if not os.path.isdir(package):
raise OSError('Package {0} not found!'.format(package))
log_path = os.path.join(package, 'annotation_logs')
if not os.path.isdir(log_path):
raise ValueError('Package {0}: annotation_logs not found, probably not a package.'
''.format(package))
# Collect all log days
log_days = os.listdir(log_path)
# Dealing with people who copied the entire .muscimarker-tracking directory
# (potentially without the dot, as just "muscimarker-tracking")
if len(log_days) == 0:
logger.info('No logs in package {0}!'.format(package))
return []
if log_days[-1].endswith('muscimarker-tracking'):
log_path = os.path.join(log_path, log_days[-1])
log_days = os.listdir(log_path)
log_files = []
for day in log_days:
# .DS_store and other hidden files
if day.startswith('.'):
continue
# Dealing with people who copied only the JSON files
if day.endswith('json'):
logger.info('Found log file that is not inside a day dir: {0}'
''.format(day))
log_files.append(os.path.join(log_path, day))
continue
if day.endswith('xml'):
logger.info('Log file is for some reason XML instead of JSON; copied wrong files???')
continue
day_log_path = os.path.join(log_path, day)
day_log_files = [os.path.join(day_log_path, l)
for l in os.listdir(day_log_path)]
log_files += day_log_files
logger.info('In package {0}: found {1} log files.'
''.format(package, len(log_files)))
logger.debug('In package {0}: log files:\n{1}'
''.format(package, pprint.pformat(log_files)))
return log_files
def try_correct_crashed_json(fname):
"""Attempts to correct an incomplete JSON list file: if MUSCIMarker
crashed, the items list would not get correctly closed. We attempt
to remove the last comma and add a closing bracket (`]`) on a new
line instead, and return the object as a (unicode) string.
>>> json = '''
... [
... {'something': 'this', 'something': 'that'},'''
"""
with open(fname, 'r') as hdl:
lines = [l.rstrip() for l in hdl]
if lines[-1][-1] == ',':
logger.info('Correcting JSON: found hanging comma!')
lines[-1] = lines[-1][:-1]
lines.append(']')
return '\n'.join(lines)
else:
logger.info('No hanging comma, cannot deal with this situation.')
return None
def unique_logs(event_logs):
"""Checks that the event logs are unique using the start event
timestamp. Returns a list of unique event logs. If two have the same
timestamp, the first one is used.
For logging purposes, expects a dict of event logs. Keys are log file names,
values are the event lists.
"""
unique = collections.OrderedDict()
for log_file, l in event_logs.items():
if len(l) < 1:
logger.info('Got an empty log from file {0}'.format(log_file))
continue
init_event = l[0]
if '-time-' not in init_event:
raise ValueError('Got a non-event log JSON list, file {0}! Supposed init event: {1}'
''.format(log_file, init_event))
init_time = init_event['-time-']
if init_time in unique:
logger.info('Found non-unique event log {0} with timestamp {1} ({2} events)!'
' Using first ({3} events).'
''.format(log_file, init_time, len(l), len(unique[init_time])))
else:
unique[init_time] = l
return list(unique.values())
##############################################################################
# Counting results
def annotations_from_package(package):
"""Collect all annotation XML files (with complete paths)
from the given package."""
logger.info('Collecting annotation files from package {0}'.format(package))
if not os.path.isdir(package):
raise OSError('Package {0} not found!'.format(package))
annot_path = os.path.join(package, 'annotations')
if not os.path.isdir(annot_path):
raise ValueError('Package {0}: annotations not found, probably not a package.'
''.format(package))
# Collect all annotations
annotation_files = [os.path.join(annot_path, f)
for f in os.listdir(annot_path) if f.endswith('.xml')]
return annotation_files
def count_cropobjects(annot_file):
return len(parse_cropobject_list(annot_file))
def count_cropobjects_and_relationships(annot_file):
cropobjects = parse_cropobject_list(annot_file)
n_inlinks = 0
for c in cropobjects:
if c.inlinks is not None:
n_inlinks += len(c.inlinks)
return len(cropobjects), n_inlinks
##############################################################################
# Visualization
def events_by_time_units(events, seconds_per_unit=60):
"""Puts the events into bins that correspond to equally spaced
intervals of time. The length of time covered by one bin is
given by seconds_per_unit."""
# Get first event time
start_time = min([float(e['-time-']) for e in events])
# The events do not have to come in-order
bins = collections.defaultdict(list)
for e in events:
t = float(e['-time-'])
n_bin = old_div(int(t - start_time), int(seconds_per_unit))
bins[n_bin].append(e)
return bins
def plot_events_by_time(events, type_key='-fn-'):
"""Simple scatterplot visualization.
All events are expected to have a -fn- component."""
fns = [e['-fn-'] for e in events]
# Assign numbers to tracked fns
fns_by_freq = {f: len([e for e in fns if e == f]) for f in set(fns)}
fn_dict = {f: i for i, f in enumerate(sorted(list(fns_by_freq.keys()),
reverse=True,
key=lambda k: fns_by_freq[k]))}
min_time = float(events[0]['-time-'])
dataset = numpy.zeros((len(events), 2))
for i, e in enumerate(events):
dataset[i][0] = float(e['-time-']) - min_time
dataset[i][1] = fn_dict[e[type_key]]
# Now visualize
plt.scatter(dataset[:,0], dataset[:,1])
def format_as_timeflow_csv(events, delimiter='\t'):
"""There is a cool offline visualization tool caled TimeFlow,
which has a timeline app. It needs a pretty specific CSV format
to work, though."""
# What we need:
# - ID
# - Date (human?)
# - The common fields:
min_second = int(min([float(e['-time-']) for e in events]))
def format_date(e):
# return '-'.join(reversed(time_human.replace(':', '-').split('__')))
# time_human = e['-time-human-']
time = float(e['-time-'])
return str(int(time) - min_second)
# Collect all events that are in the data.
event_fields = freqdict(list(itertools.chain(*[list(e.keys()) for e in events])))
output_fields = ['ID', 'Date'] + list(event_fields.keys())
n_fields = len(output_fields)
field2idx = {f: i+2 for i, f in enumerate(event_fields.keys())}
event_table = [['' for _ in range(n_fields)] for _ in events]
for i, e in enumerate(events):
event_table[i][0] = str(i)
event_table[i][1] = format_date(e)#format_date(e['-time-human-'])
for k, v in e.items():
event_table[i][field2idx[k]] = v
# Add labels to event table to get the complete data
# that should be formatted as TSV
output_data = [output_fields] + event_table
output_lines = ['\t'.join(row) for row in output_data]
output_string = '\n'.join(output_lines)
return output_string
##############################################################################
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--inputs', nargs='+', action='store',
help='Log files to be analyzed.')
parser.add_argument('-p', '--packages', nargs='+', action='store',
help='Annotation package. If set, will pull'
' all log files in the package.')
parser.add_argument('-a', '--annotator', action='store',
help='Annotator. If set, will pull all log files'
' from all packages in the given person\'s'
' annotation directory')
parser.add_argument('--exclude_packages', nargs='+', action='store',
help='Do not count given package names.')
parser.add_argument('-c', '--count_annotations', action='store_true',
help='If given, will collect annotation files from the'
' supplied packages (or per-annotator packages)'
' and compute object/rel counts and efficiency statistics.')
parser.add_argument('--no_training', action='store_true',
help='If given, will ignore packages with "training" in their name.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
def main(args):
logger.info('Starting main...')
_start_time = time.clock()
if args.annotator is not None:
logger.info('Collecting annotation packages for annotator {0}'
''.format(args.annotator))
# Collect all packages, incl. training
packages = []
for d in os.listdir(args.annotator):
package_candidate = os.path.join(args.annotator, d)
if not is_annotation_package(package_candidate):
continue
packages.append(package_candidate)
logger.info('Found: {0} packages'.format(len(packages)))
args.packages = packages
if args.packages is not None:
if args.exclude_packages is not None:
args.packages = [p for p in args.packages
if len([e for e in args.exclude_packages
if p.endswith(e)]) == 0
]
logger.info('Collecting log files for {0} packages.'.format(len(args.packages)))
logger.warning('Found packages:\n{0}'.format('\n'.join(args.packages)))
log_files = []
for package in args.packages:
current_log_files = logs_from_package(package)
log_files += current_log_files
logger.info('Found: {0} log files'.format(len(log_files)))
args.input = log_files
log_data_per_file = {}
for input_file in args.input:
if not os.path.isfile(input_file):
raise ValueError('Log file {0} not found!'.format(input_file))
current_log_data = []
with codecs.open(input_file, 'r', 'utf-8') as hdl:
try:
current_log_data = json.load(hdl)
except ValueError:
logger.info('Could not parse JSON file {0}'.format(input_file))
logger.info('Attempting to correct file.')
corrected = try_correct_crashed_json(input_file)
if corrected is not None:
logger.info('Attempting to parse corrected JSON.')
try:
current_log_data = json.loads(corrected)
except ValueError:
logger.warning('Could not even parse corrected JSON, skipping file {0}.'.format(input_file))
#raise
logger.info('Success!')
else:
logger.info('Unable to correct JSON, skipping file.')
log_data_per_file[input_file] = current_log_data
logger.info('Checking logs for uniqueness. Started with {0} log files.'
''.format(len(log_data_per_file)))
log_data_per_file = unique_logs(log_data_per_file)
logger.info('After uniqueness check: {0} logs left.'.format(len(log_data_per_file)))
log_data = [e for e in itertools.chain(*log_data_per_file)]
if len(log_data) == 0:
print('Received no log data! Skipping ahead to count annotations.')
n_minutes = None
n_hours = None
else:
logger.info('Parsed {0} data items.'.format(len(log_data)))
# Your code goes here
# raise NotImplementedError()
# Frequency by -fn-:
freq_by_fn = freqdict([l.get('-fn-', None) for l in log_data])
by_minute = events_by_time_units(log_data)
by_minute_freq = {k: len(v) for k, v in list(by_minute.items())}
n_minutes = len(by_minute)
print('# minutes worked: {0}'.format(n_minutes))
n_hours = old_div(n_minutes, 60.0)
print('# hours worked: {0:.2f}'.format(n_hours))
print('CZK@120: {0:.3f}'.format(n_hours * 120))
print('CZK@150: {0:.3f}'.format(n_hours * 150))
print('CZK@180: {0:.3f}'.format(n_hours * 180))
print('Avg. events per minute: {0}'.format(old_div(float(len(log_data)), n_minutes)))
if args.count_annotations:
if args.packages is None:
raise ValueError('Cannot count annotations if no packages are given!')
n_cropobjects = 0
n_relationships = 0
for package in args.packages:
annot_files = annotations_from_package(package)
n_c_package = 0
n_r_package = 0
for f in annot_files:
n_c, n_r = count_cropobjects_and_relationships(f)
n_cropobjects += n_c
n_relationships += n_r
n_c_package += n_c
n_r_package += n_r
logger.warn('Pkg. {0}: {1} objs., {2} rels. ({3} files)'
''.format(package, n_c_package, n_r_package, len(annot_files)))
print('Total CropObjects: {0}'.format(n_cropobjects))
print('Total Relationships: {0}'.format(n_relationships))
if n_minutes is not None:
print('Cropobjects per minute: {0:.2f}'.format(old_div(n_cropobjects, float(n_minutes))))
_end_time = time.clock()
logger.info('analyze_tracking_log.py done in {0:.3f} s'.format(_end_time - _start_time))
##############################################################################
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
if args.verbose:
logger.setLevel(logging.INFO)
#logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
#logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
main(args)
| apache-2.0 |
davidpng/FCS_Database | FlowAnal/Feature_Analysis.py | 1 | 1914 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue 06 Jan 2015 03:43:24 PM PST
This file describes a machine learning and feature analysis tools
for analyzing annotated case/feature dataframes
@author: David Ng, MD
"""
import numpy as np
import scipy as sp
import pandas as pd
import os
import logging
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
log = logging.getLogger(__name__)
class Feature_Analysis(object):
def __init__(self,features_DF,annotation_DF,threads=4,**kwargs):
self.features=features_DF
self.annotations=annotation_DF
#take or make a report directory
if "report_dir" in kwargs:
self.report_dir = kwargs["report_dir"]
else:
self.report_dir = "data_report"
if not os.path.exists(self.report_dir):
os.makedirs(self.report_dir)
def classifer_setup(self,Type='SVM',**kwargs):
if Type.lower() == 'svm':
self.classifer = svm.SVC(kernal='RBF')
elif Type.lower() == 'rfc':
self.classifer = ens.RandomForestClassifier(**kwargs)
else:
raise TypeError('Classifier Type undefined')
def prototype_analysis(self,**kwargs):
clf = RandomForestClassifier(n_estimators=200, n_jobs=4, **kwargs)
scores = cross_val_score(estimator = clf,
X=self.features.value,
y=self.annotations.value,
cv=8, n_jobs=6, pre_dispatch=20)
filename = "Prototype_Analysis_Report.txt"
out_string = "mean score is: {:.03f} +/- {:.03f}".format(scores.mean(),scores.std())
fh = open(os.path.join(self.report_dir,filename),'w')
fh.write(out_string)
fh.close()
| gpl-3.0 |
PalouseRobosub/robosub | src/localization/particle_cloud_histogram.py | 1 | 1026 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import argparse
import rospy
import rostopic
import std_msgs.msg
from geometry_msgs.msg import Point
from sensor_msgs.msg import PointCloud
from sensor_msgs.msg import ChannelFloat32
class Histogram():
def callback(self, msg):
rospy.logdebug("Plotting Update")
plt.ylabel('Weight')
plt.xlabel('Value')
plt.title('Histogram')
n, bins, patches = plt.hist(msg.channels[0].values, int(200),
histtype='bar')
plt.pause(0.05)
plt.gca().clear()
def __init__(self):
# Subscribe to the topic
self.sub = rospy.Subscriber("localization/particles", PointCloud,
self.callback)
rospy.loginfo("Subscribed to localization/particles")
plt.ion()
plt.show()
if __name__ == "__main__":
rospy.init_node("Histogram", anonymous=True)
h = Histogram()
rospy.spin()
| gpl-3.0 |
henrykironde/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
EricCline/CEM_inc | env/lib/python2.7/site-packages/IPython/core/magics/pylab.py | 8 | 5620 | """Implementation of magic functions for matplotlib/pylab support.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.config.application import Application
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.warn import warn
from IPython.core.pylabtools import backends
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
magic_gui_arg = magic_arguments.argument(
'gui', nargs='?',
help="""Name of the matplotlib backend to use %s.
If given, the corresponding matplotlib backend is used,
otherwise it will be matplotlib's default
(which you can set in your matplotlib config file).
""" % str(tuple(sorted(backends.keys())))
)
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
This function lets you activate matplotlib interactive support
at any point during an IPython session. It does not import anything
into the interactive namespace.
If you are using the inline matplotlib backend in the IPython Notebook
you can set which figure formats are enabled using the following::
In [1]: from IPython.display import set_matplotlib_formats
In [2]: set_matplotlib_formats('pdf', 'svg')
See the docstring of `IPython.display.set_matplotlib_formats` and
`IPython.display.set_matplotlib_close` for more information on
changing the behavior of the inline backend.
Examples
--------
To enable the inline backend for usage with the IPython Notebook::
In [1]: %matplotlib inline
In this case, where the matplotlib default is TkAgg::
In [2]: %matplotlib
Using matplotlib backend: TkAgg
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
gui, backend = self.shell.enable_matplotlib(args.gui)
self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--no-import-all', action='store_true', default=None,
help="""Prevent IPython from performing ``import *`` into the interactive namespace.
You can govern the default behavior of this flag with the
InteractiveShellApp.pylab_import_all configurable.
"""
)
@magic_gui_arg
def pylab(self, line=''):
"""Load numpy and matplotlib to work interactively.
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
%pylab makes the following imports::
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
If you pass `--no-import-all`, the last two `*` imports will be excluded.
See the %matplotlib magic for more details about activating matplotlib
without affecting the interactive namespace.
"""
args = magic_arguments.parse_argstring(self.pylab, line)
if args.no_import_all is None:
# get default from Application
if Application.initialized():
app = Application.instance()
try:
import_all = app.pylab_import_all
except AttributeError:
import_all = True
else:
# nothing specified, no app - default True
import_all = True
else:
# invert no-import flag
import_all = not args.no_import_all
gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
self._show_matplotlib_backend(args.gui, backend)
print ("Populating the interactive namespace from numpy and matplotlib")
if clobbered:
warn("pylab import has clobbered these variables: %s" % clobbered +
"\n`%matplotlib` prevents importing * from pylab and numpy"
)
def _show_matplotlib_backend(self, gui, backend):
"""show matplotlib message backend message"""
if not gui or gui == 'auto':
print("Using matplotlib backend: %s" % backend)
| mit |
elkingtonmcb/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/image.py | 69 | 28764 | """
The image module supports basic image loading, rescaling and display
operations.
"""
from __future__ import division
import os, warnings
import numpy as np
from numpy import ma
from matplotlib import rcParams
from matplotlib import artist as martist
from matplotlib import colors as mcolors
from matplotlib import cm
# For clarity, names from _image are given explicitly in this module:
from matplotlib import _image
from matplotlib import _png
# For user convenience, the names from _image are also imported into
# the image namespace:
from matplotlib._image import *
class AxesImage(martist.Artist, cm.ScalarMappable):
zorder = 1
# map interpolation strings to module constants
_interpd = {
'nearest' : _image.NEAREST,
'bilinear' : _image.BILINEAR,
'bicubic' : _image.BICUBIC,
'spline16' : _image.SPLINE16,
'spline36' : _image.SPLINE36,
'hanning' : _image.HANNING,
'hamming' : _image.HAMMING,
'hermite' : _image.HERMITE,
'kaiser' : _image.KAISER,
'quadric' : _image.QUADRIC,
'catrom' : _image.CATROM,
'gaussian' : _image.GAUSSIAN,
'bessel' : _image.BESSEL,
'mitchell' : _image.MITCHELL,
'sinc' : _image.SINC,
'lanczos' : _image.LANCZOS,
'blackman' : _image.BLACKMAN,
}
# reverse interp dict
_interpdr = dict([ (v,k) for k,v in _interpd.items()])
interpnames = _interpd.keys()
def __str__(self):
return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
def __init__(self, ax,
cmap = None,
norm = None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample = False,
**kwargs
):
"""
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None: origin = rcParams['image.origin']
self.origin = origin
self._extent = extent
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self._filterrad = filterrad
self.set_interpolation(interpolation)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self.update(kwargs)
def get_size(self):
'Get the numrows, numcols of the input image'
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self._imcache = None
def changed(self):
"""
Call this whenever the mappable is changed so observers can
update state
"""
self._imcache = None
self._rgbacache = None
cm.ScalarMappable.changed(self)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array or the image attribute')
xmin, xmax, ymin, ymax = self.get_extent()
dxintv = xmax-xmin
dyintv = ymax-ymin
# the viewport scale factor
sx = dxintv/self.axes.viewLim.width
sy = dyintv/self.axes.viewLim.height
numrows, numcols = self._A.shape[:2]
if sx > 2:
x0 = (self.axes.viewLim.x0-xmin)/dxintv * numcols
ix0 = max(0, int(x0 - self._filterrad))
x1 = (self.axes.viewLim.x1-xmin)/dxintv * numcols
ix1 = min(numcols, int(x1 + self._filterrad))
xslice = slice(ix0, ix1)
xmin_old = xmin
xmin = xmin_old + ix0*dxintv/numcols
xmax = xmin_old + ix1*dxintv/numcols
dxintv = xmax - xmin
sx = dxintv/self.axes.viewLim.width
else:
xslice = slice(0, numcols)
if sy > 2:
y0 = (self.axes.viewLim.y0-ymin)/dyintv * numrows
iy0 = max(0, int(y0 - self._filterrad))
y1 = (self.axes.viewLim.y1-ymin)/dyintv * numrows
iy1 = min(numrows, int(y1 + self._filterrad))
if self.origin == 'upper':
yslice = slice(numrows-iy1, numrows-iy0)
else:
yslice = slice(iy0, iy1)
ymin_old = ymin
ymin = ymin_old + iy0*dyintv/numrows
ymax = ymin_old + iy1*dyintv/numrows
dyintv = ymax - ymin
sy = dyintv/self.axes.viewLim.height
else:
yslice = slice(0, numrows)
if xslice != self._oldxslice or yslice != self._oldyslice:
self._imcache = None
self._oldxslice = xslice
self._oldyslice = yslice
if self._imcache is None:
if self._A.dtype == np.uint8 and len(self._A.shape) == 3:
im = _image.frombyte(self._A[yslice,xslice,:], 0)
im.is_grayscale = False
else:
if self._rgbacache is None:
x = self.to_rgba(self._A, self._alpha)
self._rgbacache = x
else:
x = self._rgbacache
im = _image.fromarray(x[yslice,xslice], 0)
if len(self._A.shape) == 2:
im.is_grayscale = self.cmap.is_gray()
else:
im.is_grayscale = False
self._imcache = im
if self.origin=='upper':
im.flipud_in()
else:
im = self._imcache
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg( *bg)
# image input dimensions
im.reset_matrix()
numrows, numcols = im.get_size()
im.set_interpolation(self._interpd[self._interpolation])
im.set_resample(self._resample)
# the viewport translation
tx = (xmin-self.axes.viewLim.x0)/dxintv * numcols
ty = (ymin-self.axes.viewLim.y0)/dyintv * numrows
l, b, r, t = self.axes.bbox.extents
widthDisplay = (round(r) + 0.5) - (round(l) - 0.5)
heightDisplay = (round(t) + 0.5) - (round(b) - 0.5)
widthDisplay *= magnification
heightDisplay *= magnification
im.apply_translation(tx, ty)
# resize viewport to display
rx = widthDisplay / numcols
ry = heightDisplay / numrows
im.apply_scaling(rx*sx, ry*sy)
im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5),
norm=self._filternorm, radius=self._filterrad)
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
if (self.axes.get_xscale() != 'linear' or
self.axes.get_yscale() != 'linear'):
warnings.warn("Images are not supported on non-linear axes.")
im = self.make_image(renderer.get_image_magnification())
im._url = self.get_url()
l, b, widthDisplay, heightDisplay = self.axes.bbox.bounds
clippath, affine = self.get_transformed_clip_path_and_affine()
renderer.draw_image(round(l), round(b), im, self.axes.bbox.frozen(),
clippath, affine)
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image.
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
x, y = mouseevent.xdata, mouseevent.ydata
xmin, xmax, ymin, ymax = self.get_extent()
if xmin > xmax:
xmin,xmax = xmax,xmin
if ymin > ymax:
ymin,ymax = ymax,ymin
#print x, y, xmin, xmax, ymin, ymax
if x is not None and y is not None:
inside = x>=xmin and x<=xmax and y>=ymin and y<=ymax
else:
inside = False
return inside,{}
def write_png(self, fname, noscale=False):
"""Write the image to png file with fname"""
im = self.make_image()
if noscale:
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def set_data(self, A, shape=None):
"""
Set the image array
ACCEPTS: numpy/PIL Image A"""
# check if data is PIL Image without importing Image
if hasattr(A,'getpixel'):
self._A = pil_to_array(A)
elif ma.isMA(A):
self._A = A
else:
self._A = np.asarray(A) # assume array
if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self._imcache =None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
def set_array(self, A):
"""
retained for backwards compatibility - use set_data instead
ACCEPTS: numpy array A or PIL Image"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked
# by mistake.
self.set_data(A)
def set_extent(self, extent):
"""extent is data axes (left, right, bottom, top) for making image plots
"""
self._extent = extent
xmin, xmax, ymin, ymax = extent
corners = (xmin, ymin), (xmax, ymax)
self.axes.update_datalim(corners)
if self.axes._autoscaleon:
self.axes.set_xlim((xmin, xmax))
self.axes.set_ylim((ymin, ymax))
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning',
'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' |
'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' |
'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' |
'sinc' | 'lanczos' | ]
"""
if s is None: s = rcParams['image.interpolation']
s = s.lower()
if s not in self._interpd:
raise ValueError('Illegal interpolation string')
self._interpolation = s
def set_resample(self, v):
if v is None: v = rcParams['image.resample']
self._resample = v
def get_interpolation(self):
return self._resample
def get_extent(self):
'get the image extent: left, right, bottom, top'
if self._extent is not None:
return self._extent
else:
sz = self.get_size()
#print 'sz', sz
numrows, numcols = sz
if self.origin == 'upper':
return (-0.5, numcols-0.5, numrows-0.5, -0.5)
else:
return (-0.5, numcols-0.5, -0.5, numrows-0.5)
def set_filternorm(self, filternorm):
"""Set whether the resize filter norms the weights -- see
help for imshow
ACCEPTS: 0 or 1
"""
if filternorm:
self._filternorm = 1
else:
self._filternorm = 0
def get_filternorm(self):
'return the filternorm setting'
return self._filternorm
def set_filterrad(self, filterrad):
"""Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
ACCEPTS: positive float
"""
r = float(filterrad)
assert(r>0)
self._filterrad = r
def get_filterrad(self):
'return the filterrad setting'
return self._filterrad
class NonUniformImage(AxesImage):
def __init__(self, ax,
**kwargs
):
interp = kwargs.pop('interpolation', 'nearest')
AxesImage.__init__(self, ax,
**kwargs)
AxesImage.set_interpolation(self, interp)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x0, y0, v_width, v_height = self.axes.viewLim.bounds
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width *= magnification
height *= magnification
im = _image.pcolor(self._Ax, self._Ay, self._A,
height, width,
(x0, x0+v_width, y0, y0+v_height),
self._interpd[self._interpolation])
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg(*bg)
im.is_grayscale = self.is_grayscale
return im
def set_data(self, x, y, A):
x = np.asarray(x,np.float32)
y = np.asarray(y,np.float32)
if not ma.isMA(A):
A = np.asarray(A)
if len(x.shape) != 1 or len(y.shape) != 1\
or A.shape[0:2] != (y.shape[0], x.shape[0]):
raise TypeError("Axes don't match array shape")
if len(A.shape) not in [2, 3]:
raise TypeError("Can only plot 2D or 3D data")
if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]:
raise TypeError("3D arrays must have three (RGB) or four (RGBA) color components")
if len(A.shape) == 3 and A.shape[2] == 1:
A.shape = A.shape[0:2]
if len(A.shape) == 2:
if A.dtype != np.uint8:
A = (self.cmap(self.norm(A))*255).astype(np.uint8)
self.is_grayscale = self.cmap.is_gray()
else:
A = np.repeat(A[:,:,np.newaxis], 4, 2)
A[:,:,3] = 255
self.is_grayscale = True
else:
if A.dtype != np.uint8:
A = (255*A).astype(np.uint8)
if A.shape[2] == 3:
B = zeros(tuple(list(A.shape[0:2]) + [4]), np.uint8)
B[:,:,0:3] = A
B[:,:,3] = 255
A = B
self.is_grayscale = False
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_interpolation(self, s):
if s != None and not s in ('nearest','bilinear'):
raise NotImplementedError('Only nearest neighbor and bilinear interpolations are supported')
AxesImage.set_interpolation(self, s)
def get_extent(self):
if self._A is None:
raise RuntimeError('Must set data first')
return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
def set_filternorm(self, s):
pass
def set_filterrad(self, s):
pass
def set_norm(self, norm):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_norm(self, norm)
def set_cmap(self, cmap):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_cmap(self, norm)
class PcolorImage(martist.Artist, cm.ScalarMappable):
'''
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
'''
def __init__(self, ax,
x=None,
y=None,
A=None,
cmap = None,
norm = None,
**kwargs
):
"""
cmap defaults to its rc setting
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.axes = ax
self._rgbacache = None
self.update(kwargs)
self.set_data(x, y, A)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
bg = (np.array(bg)*255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = width * magnification
height = height * magnification
if self.check_update('array'):
A = self.to_rgba(self._A, alpha=self._alpha, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self.is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(self._Ax, self._Ay, A,
height,
width,
(vl.x0, vl.x1, vl.y0, vl.y1),
bg)
im.is_grayscale = self.is_grayscale
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
im = self.make_image(renderer.get_image_magnification())
renderer.draw_image(round(self.axes.bbox.xmin),
round(self.axes.bbox.ymin),
im,
self.axes.bbox.frozen(),
*self.get_transformed_clip_path_and_affine())
def set_data(self, x, y, A):
if not ma.isMA(A):
A = np.asarray(A)
if x is None:
x = np.arange(0, A.shape[1]+1, dtype=np.float64)
else:
x = np.asarray(x, np.float64).ravel()
if y is None:
y = np.arange(0, A.shape[0]+1, dtype=np.float64)
else:
y = np.asarray(y, np.float64).ravel()
if A.shape[:2] != (y.size-1, x.size-1):
print A.shape
print y.size
print x.size
raise ValueError("Axes don't match array shape")
if A.ndim not in [2, 3]:
raise ValueError("A must be 2D or 3D")
if A.ndim == 3 and A.shape[2] == 1:
A.shape = A.shape[:2]
self.is_grayscale = False
if A.ndim == 3:
if A.shape[2] in [3, 4]:
if (A[:,:,0] == A[:,:,1]).all() and (A[:,:,0] == A[:,:,2]).all():
self.is_grayscale = True
else:
raise ValueError("3D arrays must have RGB or RGBA as last dim")
self._A = A
self._Ax = x
self._Ay = y
self.update_dict['array'] = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self.update_dict['array'] = True
class FigureImage(martist.Artist, cm.ScalarMappable):
zorder = 1
def __init__(self, fig,
cmap = None,
norm = None,
offsetx = 0,
offsety = 0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None: origin = rcParams['image.origin']
self.origin = origin
self.figure = fig
self.ox = offsetx
self.oy = offsety
self.update(kwargs)
self.magnification = 1.0
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image.
"""
if callable(self._contains): return self._contains(self,mouseevent)
xmin, xmax, ymin, ymax = self.get_extent()
xdata, ydata = mouseevent.x, mouseevent.y
#print xdata, ydata, xmin, xmax, ymin, ymax
if xdata is not None and ydata is not None:
inside = xdata>=xmin and xdata<=xmax and ydata>=ymin and ydata<=ymax
else:
inside = False
return inside,{}
def get_size(self):
'Get the numrows, numcols of the input image'
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def get_extent(self):
'get the image extent: left, right, bottom, top'
numrows, numcols = self.get_size()
return (-0.5+self.ox, numcols-0.5+self.ox,
-0.5+self.oy, numrows-0.5+self.oy)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x = self.to_rgba(self._A, self._alpha)
self.magnification = magnification
# if magnification is not one, we need to resize
ismag = magnification!=1
#if ismag: raise RuntimeError
if ismag:
isoutput = 0
else:
isoutput = 1
im = _image.fromarray(x, isoutput)
fc = self.figure.get_facecolor()
im.set_bg( *mcolors.colorConverter.to_rgba(fc, 0) )
im.is_grayscale = (self.cmap.name == "gray" and
len(self._A.shape) == 2)
if ismag:
numrows, numcols = self.get_size()
numrows *= magnification
numcols *= magnification
im.set_interpolation(_image.NEAREST)
im.resize(numcols, numrows)
if self.origin=='upper':
im.flipud_out()
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
# todo: we should be able to do some cacheing here
im = self.make_image(renderer.get_image_magnification())
renderer.draw_image(round(self.ox), round(self.oy), im, self.figure.bbox,
*self.get_transformed_clip_path_and_affine())
def write_png(self, fname):
"""Write the image to png file with fname"""
im = self.make_image()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def imread(fname):
"""
Return image file in *fname* as :class:`numpy.array`.
Return value is a :class:`numpy.array`. For grayscale images, the
return array is MxN. For RGB images, the return value is MxNx3.
For RGBA images the return value is MxNx4.
matplotlib can only read PNGs natively, but if `PIL
<http://www.pythonware.com/products/pil/>`_ is installed, it will
use it to load the image and return an array (if possible) which
can be used with :func:`~matplotlib.pyplot.imshow`.
TODO: support RGB and grayscale return values in _image.readpng
"""
def pilread():
'try to load the image with PIL or return None'
try: import Image
except ImportError: return None
image = Image.open( fname )
return pil_to_array(image)
handlers = {'png' :_png.read_png,
}
basename, ext = os.path.splitext(fname)
ext = ext.lower()[1:]
if ext not in handlers.keys():
im = pilread()
if im is None:
raise ValueError('Only know how to handle extensions: %s; with PIL installed matplotlib can handle more images' % handlers.keys())
return im
handler = handlers[ext]
return handler(fname)
def pil_to_array( pilImage ):
"""
load a PIL image and return it as a numpy array of uint8. For
grayscale images, the return array is MxN. For RGB images, the
return value is MxNx3. For RGBA images the return value is MxNx4
"""
def toarray(im):
'return a 1D array of floats'
x_str = im.tostring('raw',im.mode,0,-1)
x = np.fromstring(x_str,np.uint8)
return x
if pilImage.mode in ('RGBA', 'RGBX'):
im = pilImage # no need to convert images
elif pilImage.mode=='L':
im = pilImage # no need to luminance images
# return MxN luminance array
x = toarray(im)
x.shape = im.size[1], im.size[0]
return x
elif pilImage.mode=='RGB':
#return MxNx3 RGB array
im = pilImage # no need to RGB images
x = toarray(im)
x.shape = im.size[1], im.size[0], 3
return x
else: # try to convert to an rgba image
try:
im = pilImage.convert('RGBA')
except ValueError:
raise RuntimeError('Unknown image mode')
# return MxNx4 RGBA array
x = toarray(im)
x.shape = im.size[1], im.size[0], 4
return x
def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
preview=False):
"""
make a thumbnail of image in *infile* with output filename
*thumbfile*.
*infile* the image file -- must be PNG or PIL readable if you
have `PIL <http://www.pythonware.com/products/pil/>`_ installed
*thumbfile*
the thumbnail filename
*scale*
the scale factor for the thumbnail
*interpolation*
the interpolation scheme used in the resampling
*preview*
if True, the default backend (presumably a user interface
backend) will be used which will cause a figure to be raised
if :func:`~matplotlib.pyplot.show` is called. If it is False,
a pure image backend will be used depending on the extension,
'png'->FigureCanvasAgg, 'pdf'->FigureCanvasPDF,
'svg'->FigureCanvasSVG
See examples/misc/image_thumbnail.py.
.. htmlonly::
:ref:`misc-image_thumbnail`
Return value is the figure instance containing the thumbnail
"""
basedir, basename = os.path.split(infile)
baseout, extout = os.path.splitext(thumbfile)
im = imread(infile)
rows, cols, depth = im.shape
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
height = float(rows)/dpi*scale
width = float(cols)/dpi*scale
extension = extout.lower()
if preview:
# let the UI backend do everything
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(width, height), dpi=dpi)
else:
if extension=='.png':
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
elif extension=='.pdf':
from matplotlib.backends.backend_pdf import FigureCanvasPDF as FigureCanvas
elif extension=='.svg':
from matplotlib.backends.backend_svg import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
from matplotlib.figure import Figure
fig = Figure(figsize=(width, height), dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks=[])
basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
| agpl-3.0 |
ctogle/dilapidator | src/dilap/BROKEN/generate/landscape.py | 1 | 7203 | import dilap.core.base as db
import dilap.core.vector as dpv
import dilap.core.ray as dr
import dilap.core.bbox as dbb
import dilap.core.tools as dpr
import dilap.core.sgraph as dsg
import dilap.core.context as dgc
#import dilap.core.mesh as dms
import dilap.io.io as dio
import dilap.primitive.cube as dcu
import dilap.primitive.terrain as dt
#import dilap.core.tmesh as dtm
import dilap.mesh.piecewisecomplex as pwc
import dilap.mesh.tools as dtl
import random,pdb
import matplotlib.pyplot as plt
class landscape(dgc.context):
def __init__(self,*args,**kwargs):
dgc.context.__init__(self,*args,**kwargs)
self._def('controls',[],**kwargs)
self._def('holes',[],**kwargs)
self._def('regions',[],**kwargs)
self._def('sealevel',-0.5,**kwargs)
def search_offset_random(self,p1,p2):
return (2.0*random.random()-1.0)*dpv.distance(p1,p2)/8.0
def search_offset(self,p,p1,p2,w,thresh1 = 20,thresh2 = 100):
ccnt = len(self.controls)
if ccnt < 1:return self.search_offset_random(p1,p2)
controlx = dpv.find_closest_xy(p,self.controls,ccnt,5.0)
controld = dpv.distance_xy(p,self.controls[controlx])
if controld < thresh1:
z = self.controls[controlx].z-p.z
w.scale_u(0.0)
#self.controls.pop(controlx)
elif controld < thresh2:
rz = self.search_offset_random(p1,p2)
pz = self.controls[controlx].z-p.z
#z = pz+(rz-pz)*(controld/thresh2)**2
z = pz+(rz-pz)*(controld/thresh2)**3
else:z = self.search_offset_random(p1,p2)
return z
# if theres a coord near p in ps, return its index
# else append and return the new index
def search(self,ps,ps0,p1,p2,w):
p = dpv.midpoint(p1,p2)
pfnd = p.nearest(ps0)
if not pfnd == -1:return pfnd
z = self.search_offset(p,p1,p2,w)
pcnt = len(ps)
ps.append((p,z,w))
ps0.append(p)
return pcnt
def split(self,ps,ws,ts):
level0 = []
level = []
after = []
mpt = dpv.midpoint
pcnt = len(ps)
for t in ts:
if ws[t[0]] == ws[t[1]]:w1 = ws[t[0]].copy()
else:w1 = dpv.one()
if ws[t[1]] == ws[t[2]]:w2 = ws[t[1]].copy()
else:w2 = dpv.one()
if ws[t[2]] == ws[t[0]]:w3 = ws[t[2]].copy()
else:w3 = dpv.one()
m1 = self.search(level,level0,ps[t[0]],ps[t[1]],w1)+pcnt
m2 = self.search(level,level0,ps[t[1]],ps[t[2]],w2)+pcnt
m3 = self.search(level,level0,ps[t[2]],ps[t[0]],w3)+pcnt
after.append((t[0],m1,m3))
after.append((m1,t[1],m2))
after.append((m3,m2,t[2]))
after.append((m1,m2,m3))
for l in level:
l[0].z += l[1]
ps.append(l[0])
ws.append(l[2])
return after
def _cover(self,radius = 100,tri_edgelength = 10,mod_edgelength = 250):
convexcover = dpr.pts_to_convex_xy(self.regions)
dpr.inflate(convexcover,radius)
pts,tris = dpr.triangle_cover(convexcover,mod_edgelength)
wts = [dpv.one() for x in pts]
self.center = dpv.center_of_mass(convexcover)
return pts,wts,tris,convexcover
def tmodels(self,splits = 3):
pts,wts,tris,convex = self._cover()
maxh = 200.0
dtbs = [dpv.distance_to_border_xy(p,convex) for p in pts]
mdtb = max(dtbs)
deltas = [(0.0 + maxh*(dtbs[x]/mdtb)**3)
if dpv.inside(pts[x],convex) else -20.0 for x in range(len(dtbs))]
for x in range(len(pts)):pts[x].translate_z(deltas[x])
for x in range(splits):tris = self.split(pts,wts,tris)
m = dtm.meshme(pts,None,None,wts,[],tris)
vbnd = [x for x in range(len(m.vs)) if len(m.vs[x].vring) < 6]
#dpr.plot_points([m.vs[x].p for x in vbnd])
for vbx in vbnd:
m.vs[vbx].w.scale_u(0.0)
m.vs[vbx].p.z = -20.0
return m
def generate(self,other = None,worn = 0):
random.seed(0)
m = self.tmodels()
m.smooths(100,0.1,method = 'uniform')
flatholes = dpr.flatten(self.holes)
hbb = dbb.bb_from_ps(flatholes)
hbb._consume_x(dpv.vector2d(hbb.x.x-5.0,hbb.x.y+5.0))
hbb._consume_y(dpv.vector2d(hbb.y.x-5.0,hbb.y.y+5.0))
hbb._consume_z(dpv.vector2d(-1000.0,1000.0))
mps = m.gpdata()
mfs = m.intersect_aaabbb(hbb)
#ax = dtl.plot_axes_xy()
#for hdx in range(len(self.holes)):
# hole = self.holes[hdx]
# dtl.plot_polygon_xy(hole,ax)
#plt.show()
cutrng = []
for mfx in range(len(mfs)):
mfps = m.gfpdat(mfs[mfx])
for hdx in range(len(self.holes)):
hole = self.holes[hdx]
isect = 1 - dpv.separating_axis(mfps,hole)
if isect:
if not mfx in cutrng:
cutrng.append(mfs[mfx])
hbnd = m.cut_hole(cutrng)
hvs = []
for h in hbnd:
m.vs[h].w.scale_u(0.0)
hvs.append(m.vs[h])
#bns = [dpv.zhat.copy() for x in flatholes]
#bus = [dpv.zero2d() for x in flatholes]
#bws = [dpv.one() for x in flatholes]
#fvs = m.newvdata(flatholes,bns,bus,bws)
#pdb.set_trace()
holecover = pwc.model_plc(points = [m.vs[h].p for h in hbnd])
#dpr.plot_points([m.vs[h].p for h in hbnd],edges = False)
#for hx in hbnd:
# m.vs[hx].p.z = 100
# for hxx in m.vs[hx].vring:m.vs[hxx].p.z = 100
#loops = m.looprank(hbnd)
#dvs = loops[0][0] + fvs
#dvs = hbnd[:]
#patch = m.delaunaymethod(dvs)
#flatpatch = m.flatten(patch,dpv.vector(0,0,100),dpv.zhat.copy())
'''#
pfaces = [(m.vs[x].p,m.vs[y].p,m.vs[z].p) for x,y,z in m.fs]
mbb = dcu.cube().scale_u(100).scale_z(10)._aaabbb()
mps = m.gpdata()
hitfaces = dbb.intersect_tri_filter(mbb,m.fs,mps)
#hbnd = m.order_loop(m.cut_hole(hitfaces))
hbnd = m.cut_hole(hitfaces)
hbps = [m.vs[hx].p for hx in hbnd]
v = dpv.vector(0,0,50)
bps = dpv.translate_coords(dpr.dice_edges(dpr.corners(80,60),3),v)
#bps.reverse()
bns = [dpv.zhat.copy() for x in bps]
bus = [dpv.zero2d() for x in bps]
bws = [dpv.one() for x in bps]
bvs = m.newvdata(bps,bns,bus,bws)
for bvx in range(len(bvs)):m.vs[bvx].w.scale_u(0.0)
bvsloop = [(bvs[x-1],bvs[x]) for x in range(len(bvs))]
for e in bvsloop:m.vs[e[0]].edge(e[1])
#patch = m.advfrontmethod(hbnd)
#patch = m.advfrontmethod(bvs)
#patch = m.afm_bridge(hbnd,bvs)
#patch = m.delaunay_bridge(hbnd,bvs)
#patch = m.delaunaymethod(hbnd)
patch = m.delaunaymethod(bvs)
flatpatch = m.flatten(patch,dpv.vector(0,0,50),dpv.zhat.copy())
'''#
#pdb.set_trace()
self.landmd = m.pelt()
self.landmd._consume(holecover)
self.landbb = self.landmd._aaabbb()
self._models_to_graph(self.landmd)
return self
| mit |
matthewshoup/airflow | airflow/www/app.py | 3 | 68914 | from __future__ import print_function
from __future__ import division
from builtins import str
from past.utils import old_div
import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
import json
import logging
import os
import socket
import sys
import time
from flask._compat import PY2
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import")
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
FILTER_BY_OWNER = False
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = AUTHENTICATE
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: pygment_html_render(x, lexers.BashLexer),
'hql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'sql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'doc': lambda x: pygment_html_render(x, lexers.TextLexer),
'doc_json': lambda x: pygment_html_render(x, lexers.JsonLexer),
'doc_rst': lambda x: pygment_html_render(x, lexers.RstLexer),
'doc_yaml': lambda x: pygment_html_render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: pygment_html_render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
class TreeForm(Form):
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active, DM.owners == current_user.username).all()
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {dag.dag_id: dag for dag in dags if (dag.owner == current_user.username and (not dag.parent_dag))}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).all()[0]
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template('airflow/circles.html'), 404
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login')
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date == execution_date,
TI.task_id.in_(task_ids)).all()
if confirmed:
updated_task_ids = []
for ti in tis:
updated_task_ids.append(ti.task_id)
ti.state = State.SUCCESS
session.commit()
to_insert = list(set(task_ids) - set(updated_task_ids))
for task_id in to_insert:
ti = TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(task_ids)))
return redirect(origin)
else:
if not task_ids:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id in task_ids:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if not base_date:
base_date = dag.latest_execution_date or datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
base_date = utils.round_time(base_date, dag.schedule_interval)
form = TreeForm(data={'base_date': base_date, 'num_runs': num_runs})
start_date = dag.start_date
if not start_date and 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
if start_date:
difference = base_date - start_date
offset = timedelta(seconds=int(difference.total_seconds() % dag.schedule_interval.total_seconds()))
base_date -= offset
base_date -= timedelta(microseconds=base_date.microsecond)
from_date = (base_date - (num_runs * dag.schedule_interval))
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / len(dag.roots)
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t, set()) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0], set())
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), old_div((
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds(),(60*60))
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes="table table-bordered table-striped no-wrap",
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queued_dttm', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted',)
form_overrides = dict(_password=VisiblePasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print(bp)
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
| apache-2.0 |
void32/mpld3 | examples/random_walk.py | 19 | 2303 | """
Visualizing Random Walks
========================
This shows the use of transparent lines to visualize random walk data.
Thre is also a custom plugin defined which causes lines to be highlighted
when the mouse hovers over them.
Use the toolbar buttons at the bottom-right of the plot to enable zooming
and panning, and to reset the view.
"""
import jinja2
import json
import numpy as np
import matplotlib.pyplot as plt
import mpld3
from mpld3 import plugins, utils
class HighlightLines(plugins.PluginBase):
"""A plugin to highlight lines on hover"""
JAVASCRIPT = """
mpld3.register_plugin("linehighlight", LineHighlightPlugin);
LineHighlightPlugin.prototype = Object.create(mpld3.Plugin.prototype);
LineHighlightPlugin.prototype.constructor = LineHighlightPlugin;
LineHighlightPlugin.prototype.requiredProps = ["line_ids"];
LineHighlightPlugin.prototype.defaultProps = {alpha_bg:0.3, alpha_fg:1.0}
function LineHighlightPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHighlightPlugin.prototype.draw = function(){
for(var i=0; i<this.props.line_ids.length; i++){
var obj = mpld3.get_element(this.props.line_ids[i], this.fig),
alpha_fg = this.props.alpha_fg;
alpha_bg = this.props.alpha_bg;
obj.elements()
.on("mouseover", function(d, i){
d3.select(this).transition().duration(50)
.style("stroke-opacity", alpha_fg); })
.on("mouseout", function(d, i){
d3.select(this).transition().duration(200)
.style("stroke-opacity", alpha_bg); });
}
};
"""
def __init__(self, lines):
self.lines = lines
self.dict_ = {"type": "linehighlight",
"line_ids": [utils.get_id(line) for line in lines],
"alpha_bg": lines[0].get_alpha(),
"alpha_fg": 1.0}
N_paths = 50
N_steps = 100
x = np.linspace(0, 10, 100)
y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
y = y.cumsum(1)
fig, ax = plt.subplots(subplot_kw={'xticks': [], 'yticks': []})
lines = ax.plot(x, y.T, color='blue', lw=4, alpha=0.1)
plugins.connect(fig, HighlightLines(lines))
mpld3.show()
| bsd-3-clause |
raman-sharma/pyAudioAnalysis | data/testComputational.py | 5 | 3609 | import sys
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioSegmentation as aS
import matplotlib.pyplot as plt
import time
nExp = 4
def main(argv):
if argv[1] == "-shortTerm":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
t2 = time.clock()
perTime1 = duration / (t2-t1); print "short-term feature extraction: {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-classifyFile":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
aT.fileClassification("diarizationExample.wav", "svmSM","svm")
t2 = time.clock()
perTime1 = duration / (t2-t1); print "Mid-term feature extraction + classification \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-mtClassify":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
[flagsInd, classesAll, acc] = aS.mtFileClassification("diarizationExample.wav", "svmSM", "svm", False, '')
t2 = time.clock()
perTime1 = duration / (t2-t1); print "Fix-sized classification - segmentation \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-hmmSegmentation":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
aS.hmmSegmentation('diarizationExample.wav', 'hmmRadioSM', False, '')
t2 = time.clock()
perTime1 = duration / (t2-t1); print "HMM-based classification - segmentation \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-silenceRemoval":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
segments = aS.silenceRemoval(x, Fs, 0.050, 0.050, smoothWindow = 1.0, Weight = 0.3, plot = False)
t2 = time.clock()
perTime1 = duration / (t2-t1); print "Silence removal \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-thumbnailing":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.readAudioFile("scottish.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.clock()
[A1, A2, B1, B2, Smatrix] = aS.musicThumbnailing(x1, Fs1, 1.0, 1.0, 15.0) # find thumbnail endpoints
t2 = time.clock()
perTime1 = duration1 / (t2-t1); print "Thumbnail \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-diarization-noLDA":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.readAudioFile("diarizationExample.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.clock()
aS.speakerDiarization("diarizationExample.wav", 4, LDAdim = 0, PLOT = False)
t2 = time.clock()
perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-diarization-LDA":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.readAudioFile("diarizationExample.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.clock()
aS.speakerDiarization("diarizationExample.wav", 4, PLOT = False)
t2 = time.clock()
perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
rupak0577/ginga | ginga/examples/matplotlib/example3_mpl.py | 2 | 15075 | #! /usr/bin/env python
#
# example3_mpl.py -- Copy attributes from a Ginga Qt widget into a Matplotlib
# figure.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
#
"""
$ ./example3_mpl.py [fits file]
example3 displays a native ginga widget beside a matplotlib figure as two
panes. A fits file can be dropped into the left pane and manipulated using
the standard Ginga interactive controls
see (http://ginga.readthedocs.io/en/latest/quickref.html).
Drop down boxes allow the color map to be changed.
The right pane has two buttons under it: pressing each button sets up a
different kind of plot in the mpl pane based on the current state of the
ginga pane.
You need Qt4 with python bindings (or pyside) installed to run this example.
"""
from __future__ import print_function
import sys
import numpy
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
from ginga.qtw import ColorBar
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga import AstroImage
from ginga import cmap, imap
from ginga.misc import log
from ginga.util.six.moves import map, zip
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.colors import ListedColormap
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
menubar = self.menuBar()
# create a File pulldown menu, and add it to the menu bar
filemenu = menubar.addMenu("File")
item = QtGui.QAction("Open File", menubar)
item.triggered.connect(self.open_file)
filemenu.addAction(item)
sep = QtGui.QAction(menubar)
sep.setSeparator(True)
filemenu.addAction(sep)
item = QtGui.QAction("Quit", menubar)
item.triggered.connect(self.close)
filemenu.addAction(item)
# Add matplotlib color maps to our built in ones
cmap.add_matplotlib_cmaps()
self.cmaps = cmap.get_names()
self.imaps = imap.get_names()
wd, ht = 500, 500
# Create a Ginga widget
fi = ImageViewCanvas(logger, render='widget')
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.enable_draw(False)
fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
self.fitsimage = fi
# enable various key and mouse controlled actions
bd = fi.get_bindings()
bd.enable_all(True)
self.cp_tag = 'compass'
# pack widget into layout
gingaw = fi.get_widget()
gingaw.resize(wd, ht)
vbox1 = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(gingaw, stretch=1)
self.cm = cmap.get_cmap('gray')
self.im = imap.get_imap('ramp')
# add color bar
rgbmap = fi.get_rgbmap()
rgbmap.set_hash_size(256)
cbar = ColorBar.ColorBar(self.logger, rgbmap=rgbmap,
link=True)
cbar.resize(-1, 15)
#cbar.show()
self.colorbar = cbar
layout.addWidget(cbar, stretch=0)
settings = fi.get_settings()
settings.getSetting('cuts').add_callback('set',
self.change_range_cb, fi,
self.colorbar)
# color map selection widget
wcmap = QtGui.QComboBox()
for name in self.cmaps:
wcmap.addItem(name)
index = self.cmaps.index('gray')
wcmap.setCurrentIndex(index)
wcmap.activated.connect(self.set_cmap_cb)
self.wcmap = wcmap
# intensity map selection widget
wimap = QtGui.QComboBox()
for name in self.imaps:
wimap.addItem(name)
index = self.imaps.index('ramp')
wimap.setCurrentIndex(index)
wimap.activated.connect(self.set_cmap_cb)
self.wimap = wimap
#wopen = QtGui.QPushButton("Open File")
#wopen.clicked.connect(self.open_file)
# add buttons to layout
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
hbox.addStretch(1)
for w in (wcmap, wimap):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
layout.addWidget(hw, stretch=0)
vbox1.setLayout(layout)
# Create a matplotlib Figure
#self.fig = matplotlib.figure.Figure(figsize=(wd, ht))
self.fig = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.fig)
vbox2 = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
# scrw = QtGui.QScrollArea()
# scrw.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
# scrw.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
# scrw.setWidgetResizable(True)
# layout.addWidget(scrw, stretch=1)
# scrw.setWidget(self.canvas)
layout.addWidget(self.canvas, stretch=1)
# Add matplotlib buttons
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wgetimg = QtGui.QPushButton("Get Data")
wgetimg.clicked.connect(self.get_image)
wgetrgb = QtGui.QPushButton("Get RGB")
wgetrgb.clicked.connect(self.get_rgb_image)
#wquit = QtGui.QPushButton("Quit")
#wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wgetimg, wgetrgb):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
layout.addWidget(hw, stretch=0)
vbox2.setLayout(layout)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
w = QtGui.QWidget()
layout = QtGui.QHBoxLayout()
layout.addWidget(vbox1, stretch=1.0)
layout.addWidget(vbox2, stretch=1.0)
w.setLayout(layout)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
vw = QtGui.QWidget()
vw.setLayout(vbox)
self.setCentralWidget(vw)
def set_cmap_cb(self, kind):
index = self.wcmap.currentIndex()
cmap_name = self.cmaps[index]
self.cm = cmap.get_cmap(cmap_name)
index = self.wimap.currentIndex()
imap_name = self.imaps[index]
self.im = imap.get_imap(imap_name)
self.fitsimage.set_cmap(self.cm)
self.fitsimage.set_imap(self.im)
def change_range_cb(self, setting, value, fitsimage, cbar):
loval, hival = value
cbar.set_range(loval, hival)
def clear_canvas(self):
self.fitsimage.deleteAllObjects()
def load_file(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.fitsimage.set_image(image)
self.setWindowTitle(filepath)
# create compass
try:
try:
self.fitsimage.deleteObjectByTag(self.cp_tag)
except KeyError:
pass
(x, y, xn, yn, xe, ye) = image.calc_compass_center()
self.logger.debug("x=%d y=%d xn=%d yn=%d xe=%d ye=%d" % (
x, y, xn, yn, xe, ye))
Compass = self.fitsimage.getDrawClass('compass')
self.fitsimage.add(Compass(
x, y, xn, yn, xe, ye, color='skyblue',
fontsize=14), tag=self.cp_tag)
except Exception as e:
self.logger.warning("Can't calculate compass: %s" % (
str(e)))
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
#print(fileName)
self.load_file(fileName)
def closeEvent(self, ce):
self.close()
def motion(self, fitsimage, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = fitsimage.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = fitsimage.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def calculate_aspect(self, shape, extent):
dx = abs(extent[1] - extent[0]) / float(shape[1])
dy = abs(extent[3] - extent[2]) / float(shape[0])
return dx / dy
def make_mpl_colormap(self, fitsimage):
# make the equivalent color map for matplotlib
# (takes into account any cmap warps, etc.)
idx = numpy.array(list(range(256)), dtype='uint')
idx = idx.reshape((256, 1))
rgbmap = fitsimage.get_rgbmap()
carr = rgbmap.get_rgbarray(idx).get_array('RGB').astype('float')
carr = carr / 256.0
carr = carr.reshape((256, 3))
cm = ListedColormap(carr, N=256)
return cm
def get_wcs_extent(self, image, x0, y0, x1, y1):
# WCS of the area
ra0, dec0 = image.pixtoradec(x0, y0, format='deg', coords='data')
ra1, dec1 = image.pixtoradec(x1, y1, format='deg', coords='data')
extent = (ra0, ra1, dec0, dec1)
return extent
def get_rgb_image(self):
fi = self.fitsimage
# clear previous image
self.fig.clf()
# Grab the RGB array for the current image and place it in the
# matplotlib figure axis
arr = fi.getwin_array(order='RGB')
# force aspect ratio of figure to match
wd, ht = fi.get_window_size()
# Get the data extents
x0, y0 = fi.get_data_xy(0, 0)
x1, y1 = fi.get_data_xy(wd-1, ht-1)
flipx, flipy, swapxy = fi.get_transforms()
if swapxy:
x0, x1, y0, y1 = y0, y1, x0, x1
xlabel = 'dec'
ylabel = 'ra'
else:
xlabel = 'ra'
ylabel = 'dec'
#extent = (x0, x1, y1, y0)
image = fi.get_image()
extent = self.get_wcs_extent(image, x0, x1, y1, y0)
#print "extent=%s" % (str(extent))
# Calculate aspect ratio
aspect = self.calculate_aspect(arr.shape, extent)
#ax = self.fig.add_subplot(111, adjustable='box', aspect=aspect)
ax = self.fig.add_subplot(111)
ax.autoscale(True, tight=True)
ax.set_anchor('C')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# make the equivalent color map for matplotlib
cm = self.make_mpl_colormap(fi)
img = ax.imshow(arr, interpolation="nearest", origin="upper",
vmin=0, vmax=255,
extent=extent,
aspect=aspect)
# force an update of the figure
self.fig.canvas.draw()
def get_image(self):
fi = self.fitsimage
# clear previous image
self.fig.clf()
ax = self.fig.add_subplot(111)
ax.autoscale(True, tight=True)
x0, y0, x1, y1 = tuple(map(int, fi.get_datarect()))
#extent = (x0, x1, y0, y1)
image = fi.get_image()
arr = image.cutout_data(x0, y0, x1, y1)
extent = self.get_wcs_extent(image, x0, y0, x1, y1)
# get cut levels
loval, hival = fi.get_cut_levels()
# make the equivalent color map for matplotlib
cm = self.make_mpl_colormap(fi)
# add the image to the figure
interp = 'nearest'
img = ax.imshow(arr, interpolation=interp, origin="lower",
vmin=loval, vmax=hival, cmap=cm,
aspect="equal", extent=extent)
# add a colorbar
self.fig.colorbar(img, orientation='vertical')
# force an update of the figure
self.fig.canvas.draw()
def main(options, args):
QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication(args)
app.connect(app, QtCore.SIGNAL('lastWindowClosed()'),
app, QtCore.SLOT('quit()'))
logger = log.get_logger(name="example3", options=options)
w = FitsViewer(logger)
w.resize(1024, 540)
w.show()
app.setActiveWindow(w)
w.raise_()
w.activateWindow()
if len(args) > 0:
w.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=None,
help="Set logging level to LEVEL")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/tricontour_vs_griddata.py | 6 | 1449 | """
Comparison of griddata and tricontour for an unstructured triangular grid.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
from numpy.random import uniform, seed
from matplotlib.mlab import griddata
import time
seed(0)
npts = 200
ngridx = 100
ngridy = 200
x = uniform(-2,2,npts)
y = uniform(-2,2,npts)
z = x*np.exp(-x**2-y**2)
# griddata and contour.
start = time.clock()
plt.subplot(211)
xi = np.linspace(-2.1,2.1,ngridx)
yi = np.linspace(-2.1,2.1,ngridy)
zi = griddata(x,y,z,xi,yi,interp='linear')
plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k')
plt.contourf(xi,yi,zi,15,cmap=plt.cm.rainbow,
norm=plt.normalize(vmax=abs(zi).max(), vmin=-abs(zi).max()))
plt.colorbar() # draw colorbar
plt.plot(x, y, 'ko', ms=3)
plt.xlim(-2,2)
plt.ylim(-2,2)
plt.title('griddata and contour (%d points, %d grid points)' % (npts, ngridx*ngridy))
print ('griddata and contour seconds: %f' % (time.clock() - start))
# tricontour.
start = time.clock()
plt.subplot(212)
triang = tri.Triangulation(x, y)
plt.tricontour(x, y, z, 15, linewidths=0.5, colors='k')
plt.tricontourf(x, y, z, 15, cmap=plt.cm.rainbow,
norm=plt.normalize(vmax=abs(zi).max(), vmin=-abs(zi).max()))
plt.colorbar()
plt.plot(x, y, 'ko', ms=3)
plt.xlim(-2,2)
plt.ylim(-2,2)
plt.title('tricontour (%d points)' % npts)
print ('tricontour seconds: %f' % (time.clock() - start))
plt.show()
| mit |
EthanMacdonald/h-DQN | run.py | 1 | 4491 | import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
from envs.mdp import StochasticMDPEnv
from agent.hDQN import hDQN
plt.style.use('ggplot')
def one_hot(state):
vector = np.zeros(6)
vector[state-1] = 1.0
return np.expand_dims(vector, axis=0)
def main():
ActorExperience = namedtuple("ActorExperience", ["state", "goal", "action", "reward", "next_state", "done"])
MetaExperience = namedtuple("MetaExperience", ["state", "goal", "reward", "next_state", "done"])
env = StochasticMDPEnv()
agent = hDQN()
visits = np.zeros((12, 6))
anneal_factor = (1.0-0.1)/12000
print("Annealing factor: " + str(anneal_factor))
for episode_thousand in range(12):
for episode in range(1000):
print("\n\n### EPISODE " + str(episode_thousand*1000 + episode) + "###")
state = env.reset()
visits[episode_thousand][state-1] += 1
done = False
while not done:
goal = agent.select_goal(one_hot(state))
agent.goal_selected[goal-1] += 1
print("\nNew Goal: " + str(goal) + "\nState-Actions: ")
total_external_reward = 0
goal_reached = False
while not done and not goal_reached:
action = agent.select_move(one_hot(state), one_hot(goal), goal)
print(str((state,action)) + "; ")
next_state, external_reward, done = env.step(action)
visits[episode_thousand][next_state-1] += 1
intrinsic_reward = agent.criticize(goal, next_state)
goal_reached = next_state == goal
if goal_reached:
agent.goal_success[goal-1] += 1
print("Goal reached!! ")
if next_state == 6:
print("S6 reached!! ")
exp = ActorExperience(one_hot(state), one_hot(goal), action, intrinsic_reward, one_hot(next_state), done)
agent.store(exp, meta=False)
agent.update(meta=False)
agent.update(meta=True)
total_external_reward += external_reward
state = next_state
exp = MetaExperience(one_hot(state), one_hot(goal), total_external_reward, one_hot(next_state), done)
agent.store(exp, meta=True)
#Annealing
agent.meta_epsilon -= anneal_factor
avg_success_rate = agent.goal_success[goal-1] / agent.goal_selected[goal-1]
if(avg_success_rate == 0 or avg_success_rate == 1):
agent.actor_epsilon[goal-1] -= anneal_factor
else:
agent.actor_epsilon[goal-1] = 1- avg_success_rate
if(agent.actor_epsilon[goal-1] < 0.1):
agent.actor_epsilon[goal-1] = 0.1
print("meta_epsilon: " + str(agent.meta_epsilon))
print("actor_epsilon " + str(goal) + ": " + str(agent.actor_epsilon[goal-1]))
if (episode % 100 == 99):
print("")
print(str(visits/1000) + "")
eps = list(range(1,13))
plt.subplot(2, 3, 1)
plt.plot(eps, visits[:,0]/1000)
plt.xlabel("Episodes (*1000)")
plt.ylim(-0.01, 2.0)
plt.xlim(1, 12)
plt.title("S1")
plt.grid(True)
plt.subplot(2, 3, 2)
plt.plot(eps, visits[:,1]/1000)
plt.xlabel("Episodes (*1000)")
plt.ylim(-0.01, 2.0)
plt.xlim(1, 12)
plt.title("S2")
plt.grid(True)
plt.subplot(2, 3, 3)
plt.plot(eps, visits[:,2]/1000)
plt.xlabel("Episodes (*1000)")
plt.ylim(-0.01, 2.0)
plt.xlim(1, 12)
plt.title("S3")
plt.grid(True)
plt.subplot(2, 3, 4)
plt.plot(eps, visits[:,3]/1000)
plt.xlabel("Episodes (*1000)")
plt.ylim(-0.01, 2.0)
plt.xlim(1, 12)
plt.title("S4")
plt.grid(True)
plt.subplot(2, 3, 5)
plt.plot(eps, visits[:,4]/1000)
plt.xlabel("Episodes (*1000)")
plt.ylim(-0.01, 2.0)
plt.xlim(1, 12)
plt.title("S5")
plt.grid(True)
plt.subplot(2, 3, 6)
plt.plot(eps, visits[:,5]/1000)
plt.xlabel("Episodes (*1000)")
plt.ylim(-0.01, 2.0)
plt.xlim(1, 12)
plt.title("S6")
plt.grid(True)
plt.savefig('first_run.png')
plt.show()
if __name__ == "__main__":
main()
| mit |
mtb-za/fatiando | cookbook/seismic_wavefd_elastic_sh.py | 9 | 2247 | """
Seismic: 2D finite difference simulation of elastic SH wave propagation
"""
import numpy as np
from matplotlib import animation
from fatiando import gridder
from fatiando.seismic import wavefd
from fatiando.vis import mpl
# Set the parameters of the finite difference grid
shape = (150, 150)
area = [0, 60000, 0, 60000]
# Make a density and S wave velocity model
density = 2400 * np.ones(shape)
velocity = 3700
mu = wavefd.lame_mu(velocity, density)
# Make a wave source from a mexican hat wavelet
sources = [wavefd.MexHatSource(30000, 15000, area, shape, 100, 1, delay=2)]
# Get the iterator for the simulation
dt = wavefd.maxdt(area, shape, velocity)
duration = 20
maxit = int(duration / dt)
stations = [[50000, 0]] # x, z coordinate of the seismometer
snapshot = int(0.5 / dt) # Plot a snapshot of the simulation every 0.5 seconds
simulation = wavefd.elastic_sh(mu, density, area, dt, maxit, sources, stations,
snapshot, padding=50, taper=0.01)
# This part makes an animation using matplotlibs animation API
fig = mpl.figure(figsize=(14, 5))
ax = mpl.subplot(1, 2, 2)
mpl.title('Wavefield')
# Start with everything zero and grab the plot so that it can be updated later
wavefield_plt = mpl.imshow(np.zeros(shape), extent=area, vmin=-10 ** (-5),
vmax=10 ** (-5), cmap=mpl.cm.gray_r)
mpl.points(stations, '^b')
mpl.xlim(area[:2])
mpl.ylim(area[2:][::-1])
mpl.xlabel('x (km)')
mpl.ylabel('z (km)')
mpl.subplot(1, 2, 1)
seismogram_plt, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-10 ** (-4), 10 ** (-4))
mpl.xlabel('time (s)')
mpl.ylabel('Amplitude')
times = np.linspace(0, duration, maxit)
# Update the plot everytime the simulation yields
def animate(i):
"""
Grab the iteration number, displacment panel and seismograms
"""
t, u, seismograms = simulation.next()
mpl.title('time: %0.1f s' % (times[t]))
wavefield_plt.set_array(u[::-1]) # Revert the z axis so that 0 is top
seismogram_plt.set_data(times[:t + 1], seismograms[0][:t + 1])
return wavefield_plt, seismogram_plt
anim = animation.FuncAnimation(
fig, animate, frames=maxit / snapshot, interval=1)
# anim.save('sh_wave.mp4', fps=10, dpi=200, bitrate=4000)
mpl.show()
| bsd-3-clause |
EricssonResearch/scott-eu | simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/rl_environment.py | 1 | 28605 | #!/usr/bin/env python
import rospy
import actionlib
import random
import vrep
import time
import numpy as np
import geometry_msgs.msg
import std_msgs.msg
import os
from geometry_msgs.msg import Twist, Pose
from kobuki_msgs.msg import BumperEvent
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from sensor_msgs.msg import LaserScan,Imu
from std_msgs.msg import Float64
from turtlebot2i_safety.msg import SafetyZone, VelocityScale, SafetyRisk
from collections import deque
from math import pi, sqrt, sin, cos, radians, atan2
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from shapely.geometry import Polygon, box, LineString, Point
from shapely.affinity import translate
from matplotlib import pyplot as plt
class VrepManipulation():
def __init__(self):
self.scenarioNr = 0
self.clientID = vrep.simxStart('127.0.0.1', 20001, True, True, 5000, 5)
self.dirPath = os.path.dirname(os.path.realpath(__file__))
self.model_location = self.dirPath.replace('turtlebot2i_safety/src', 'turtlebot2i_description/v-rep_model/warehouse_scene/vrep_models/turtlebot2i_for_training.ttm')
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox0 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#0', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox1 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#1', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox2 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#2', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox3 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#3', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox4 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#4', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox5 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#5', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox6 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#6', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox7 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#7', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox8 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#8', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox9 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#9', vrep.simx_opmode_blocking)
returnCode, self.round1 = vrep.simxGetObjectHandle(self.clientID, '80cmHighPillar100cm', vrep.simx_opmode_blocking)
returnCode, self.round2 = vrep.simxGetObjectHandle(self.clientID, '80cmHighPillar100cm0', vrep.simx_opmode_blocking)
returnCode, self.conv = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt', vrep.simx_opmode_blocking)
returnCode, self.conv0 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#0', vrep.simx_opmode_blocking)
returnCode, self.conv1 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#1', vrep.simx_opmode_blocking)
returnCode, self.conv2 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#2', vrep.simx_opmode_blocking)
returnCode, self.conv3 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#3', vrep.simx_opmode_blocking)
returnCode, self.conv4 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#4', vrep.simx_opmode_blocking)
returnCode, self.conv5 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#5', vrep.simx_opmode_blocking)
returnCode, self.conv6 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#6', vrep.simx_opmode_blocking)
returnCode, self.conv7 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#7', vrep.simx_opmode_blocking)
returnCode, self.conv8 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#8', vrep.simx_opmode_blocking)
returnCode, self.conv9 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#9', vrep.simx_opmode_blocking)
def setScenarioOriginal(self):
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox, -1, np.array([ 7.0, 4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox0, -1, np.array([ 1.0, 7.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox1, -1, np.array([-2.0,-3.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox2, -1, np.array([-3.0,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox3, -1, np.array([ 5.5, 0.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox4, -1, np.array([ 5.5,-4.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox5, -1, np.array([ 7.0,-2.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox6, -1, np.array([7.25,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox7, -1, np.array([ 9.0,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox8, -1, np.array([ 7.5,-6.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox9, -1, np.array([-7.0,-6.0, 0.5]), vrep.simx_opmode_oneshot_wait)
#Round object
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round1, -1, np.array([ 2.0,-4.0, 0.35]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round2, -1, np.array([ 2.0,-6.5, 0.35]), vrep.simx_opmode_oneshot_wait)
#conveyor belt
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv, -1, np.array([ 1.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv0, -1, np.array([-1.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv1, -1, np.array([-3.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv2, -1, np.array([-5.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv3, -1, np.array([-7.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv4, -1, np.array([-4.5,-6.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv5, -1, np.array([ 0.0, 4.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv6, -1, np.array([-9.0, 5.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv7, -1, np.array([-9.0,-3.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv8, -1, np.array([-4.0, 5.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv9, -1, np.array([-7.0, 3.0, 0.113]), vrep.simx_opmode_oneshot_wait)
def setScenarioMove1(self):
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox, -1, np.array([ 6.5, 4.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox0, -1, np.array([-0.5, 7.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox1, -1, np.array([-4.0,-6.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox2, -1, np.array([-2.0,-2.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox3, -1, np.array([ 7.0, 0.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox4, -1, np.array([ 5.5,-6.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox5, -1, np.array([ 8.0,-2.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox6, -1, np.array([ 6.0,-3.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox7, -1, np.array([ 8.0,-5.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox8, -1, np.array([ 7.0,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox9, -1, np.array([-8.5,-3.0, 0.5]), vrep.simx_opmode_oneshot_wait)
#Round object
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round1, -1, np.array([ 0.0,-3.0, 0.35]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round2, -1, np.array([ 4.0,-6.0, 0.35]), vrep.simx_opmode_oneshot_wait)
#conveyor belt
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv, -1, np.array([ 2.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv0, -1, np.array([-2.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv1, -1, np.array([-4.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv2, -1, np.array([-6.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv3, -1, np.array([-8.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv4, -1, np.array([-4.5,-4.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv5, -1, np.array([-4.0, 4.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv6, -1, np.array([-7.0, 2.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv7, -1, np.array([-9.0,-5.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv8, -1, np.array([0.25, 4.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv9, -1, np.array([-9.0, 5.0, 0.113]), vrep.simx_opmode_oneshot_wait)
def reset_robot_pos(self):
#reset robot position to origin
returnCode = vrep.simxRemoveModel(self.clientID, self.robot_handle, vrep.simx_opmode_oneshot_wait)
#print("Removing robot, robot handler:",self.robot_handle," | return code:",returnCode)
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
while(returnCode == 0):
returnCode = vrep.simxRemoveModel(self.clientID, self.robot_handle, vrep.simx_opmode_oneshot_wait)
rospy.loginfo("Previous removal failed. Remove robot again, robot handler:",self.robot_handle," | return code:",returnCode)
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
returnCode, self.robot_handle = vrep.simxLoadModel(self.clientID, self.model_location, 0, vrep.simx_opmode_oneshot_wait)
#print("Loading robot, robot handler:",self.robot_handle," | return code:",returnCode)
while(returnCode != 0):
returnCode, self.robot_handle = vrep.simxLoadModel(self.clientID, self.model_location, 0, vrep.simx_opmode_oneshot_wait)
rospy.loginfo("Previous loading failed. Reload robot. robot handler:",self.robot_handle," | return code:",returnCode)
def remove_all_turtlebot2i(self):
turtlebot2i_namelist = ['turtlebot2i', 'turtlebot2i#0', 'turtlebot2i#1', 'turtlebot2i#2', 'turtlebot2i#3', 'turtlebot2i#4', 'turtlebot2i#5', 'turtlebot2i#6', 'turtlebot2i#7', 'turtlebot2i#8', 'turtlebot2i#9',
'turtlebot_body_visual','turtlebot_reference','plate_middle_link_visual','plate_middle_link_respondable','GPS']
for turtlebot2i_name in turtlebot2i_namelist:
returnCode, temp_robot_handle = vrep.simxGetObjectHandle(self.clientID, turtlebot2i_name, vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxRemoveModel(self.clientID, temp_robot_handle, vrep.simx_opmode_oneshot_wait)
def check_robot_correctness(self):
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
while(returnCode != 0):
rospy.loginfo("the exact 'turtlebot2i' is not found! Try to delete all possible robot and then load again.")
self.remove_all_turtlebot2i()
returnCode, self.robot_handle = vrep.simxLoadModel(self.clientID, self.model_location, 0, vrep.simx_opmode_oneshot_wait)
time.sleep(10)
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
def changeScenario(self):
self.scenarioNr += 1
if self.scenarioNr >= 2:
self.scenarioNr = 0
if self.scenarioNr == 0:
self.setScenarioOriginal()
elif self.scenarioNr == 1:
self.setScenarioMove1()
def shutdown(self):
vrep.simxFinish(self.clientID)
class Env():
def __init__(self):
self.client = actionlib.SimpleActionClient('turtlebot2i/move_base', MoveBaseAction)
self.vrep_control = VrepManipulation()
self.goal = MoveBaseGoal()
self.goal.target_pose.pose.position.x = 5.0
self.goal.target_pose.pose.position.y = 5.0
self.target_list = [[-9.0, 6.5],
[-9.0, 3.0],
#[-6.5, 4.75],
[-4.0, 6.5],
[-4.0, 3.0],
[-0.5, 6.0],
[ 1.0, 3.0],
[ 5.0, 2.5],
[ 3.0, 0.0],
[-8.5, 0.0],
[-0.5,-2.0],
[ 9.0,-6.5],
[ 5.0,-2.0],
[-4.5,-2.5],
[ 8.5,-0.5],
[-9.0,-6.5]]
self.target_idx = random.randrange(0, len(self.target_list))
#self.heading = 0
self.action_list = [[0.0, 1.2], [0.0, 0.8], [0.0, 0.4], [0.4, 1.2], [0.4, 0.8], [0.8, 1.2], [0.0, 0.0], [0.4, 0.4], [0.8, 0.8], [1.2, 1.2], [1.2, 0.8], [0.8, 0.4], [1.2, 0.4], [0.4, 0.0], [0.8, 0.0], [1.2, 0.0]]
self.action_size = len(self.action_list)
#self.initGoal = True
self.get_goalbox = False
self.position = Pose()
self.prev_position = Pose()
self.orientation = 0.0
self.sub_pos = rospy.Subscriber('/turtlebot2i/sensors/global_pose', geometry_msgs.msg.PoseStamped, self.update_pose_callback)
#self.sub_risk = rospy.Subscriber('/turtlebot2i/safety/obstacles_risk', SafetyRisk, self.sceneGraphReconstruction) #instead of subscribing, wait this information in step function
self.sub_safetyzone = rospy.Subscriber('/turtlebot2i/safety/safety_zone', SafetyZone, self.safety_zone_callback)
self.sub_vel = rospy.Subscriber('/turtlebot2i/commands/velocity', Twist, self.speed_callback)
self.sub_bumper = rospy.Subscriber('/turtlebot2i/events/bumper', BumperEvent, self.bumper_callback)
self.pub_safe_vel = rospy.Publisher('/turtlebot2i/safety/vel_scale', VelocityScale, queue_size=10) #init publisher
#Additional
self.n_sensors = 675 #684 #if lidar on top: 684 data, if lidar in front of robot: 675 data
self.robot_linear_speed = 0.0
self.robot_angular_speed = 0.0
self.origin = Point((0.0, 0.0))
self.camera_near_clipping = 0.2 #0.01 #in meters
self.camera_far_clipping = 3.5 #in meters
self.camera_fov_angle = 57.0 #in degree
self.n_direction = 12
self.direction_list = np.linspace(-self.camera_fov_angle, self.camera_fov_angle, self.n_direction+1)
self.obstacle_map = []
self.obstacle_distances = np.ones((self.n_direction))*self.camera_far_clipping
for i in range(self.n_direction):
self.obstacle_map.append(Polygon([[self.origin.x, self.origin.y],
[self.camera_far_clipping*cos(radians(self.direction_list[i+1])),self.camera_far_clipping*sin(radians(self.direction_list[i+1]))],
[self.camera_far_clipping*cos(radians(self.direction_list[i])), self.camera_far_clipping*sin(radians(self.direction_list[i]))]]))
self.r_critical = 0.205
self.r_warning = 0.31
self.r_clear = 0.32
self.collision = False
self.risk_max = 0.0
self.nearest_type = 0
self.min_distance = self.camera_far_clipping
self.nearest_direction = 0.0
self.nearest_speed = 0.0
self.speed_monitor = deque([])
def distance2D(self, pos1, pos2):
return sqrt((pos1.x - pos2.x)**2 + (pos1.y - pos2.y)**2)
def getGoalDistance(self):
#print("goal pos:",self.goal.target_pose.pose.position,"\n robot pos:",self.position)
return self.distance2D(self.goal.target_pose.pose.position, self.position)
def update_pose_callback(self, data):
self.prev_position = self.position
self.position = data.pose.position
(roll, pitch, self.orientation) = euler_from_quaternion([data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w])
def speed_callback(self, data):
#getting data from move base module
self.robot_linear_speed = data.linear.x
self.robot_angular_speed = data.angular.z
'''
if len(self.speed_monitor) > 100:
if sum(self.speed_monitor) < 0.1: #if robot gets stuck, cancel the goal
self.speed_monitor = deque([])
self.vrep_control.reset_robot_pos()
self.respawn_goal()
rospy.loginfo("Robot is stuck, changing goal position.")
else:
self.speed_monitor.popleft()
self.speed_monitor.append(data.linear.x+abs(data.angular.z))
'''
def safety_zone_callback(self, data):
self.r_critical = data.critical_zone_radius
self.r_warning = data.warning_zone_radius
self.r_clear = data.clear_zone_radius
def bumper_callback(self, data):
if data.state == 1: #collision occurs
self.collision = True
def rotated_pos(self, pointX, pointY, centerX, centerY,r00, r01, r10, r11):
point_X_rotated = r00*pointX + r01*pointY + centerX - r00*centerX - r01*centerY
point_Y_rotated = r10*pointX + r11*pointY + centerY - r10*centerX - r11*centerY
return [point_X_rotated, point_Y_rotated]
def sceneGraphReconstruction(self, data):
self.obstacle_distances = np.ones((self.n_direction))*self.camera_far_clipping
n_obstacle = len(data.type) #count the number of detected object
if n_obstacle > 0:
self.risk_max = max(data.risk_value)
else:
self.risk_max = 0.0
fig = plt.figure(1, figsize=(3.5,6), dpi=90)
ax = fig.add_subplot(111)
for i in range(n_obstacle):
#### reconstruct the obstacle from scene graph ####
obs_center_x = (data.distance[i])*cos(radians(data.direction[i]))
obs_center_y = (data.distance[i])*sin(radians(data.direction[i]))
r00 = np.cos((-self.orientation))
r01 = -np.sin((-self.orientation))
r10 = np.sin((-self.orientation))
r11 = np.cos((-self.orientation))
obstacle = Polygon([self.rotated_pos(obs_center_x-data.size_x[i]/2, obs_center_y-data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11),
self.rotated_pos(obs_center_x-data.size_x[i]/2, obs_center_y+data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11),
self.rotated_pos(obs_center_x+data.size_x[i]/2, obs_center_y+data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11),
self.rotated_pos(obs_center_x+data.size_x[i]/2, obs_center_y-data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11)])
curr_distance = self.origin.distance(obstacle) # need to be translated
#print("distance to origin:",curr_distance,data.distance[i])
obstacle = translate(obstacle, (data.distance[i]-curr_distance)*cos(radians(data.direction[i])), (data.distance[i]-curr_distance)*sin(radians(data.direction[i])))
curr_distance = self.origin.distance(obstacle) # need to be translated
#print("distance to origin2:",curr_distance,data.distance[i])
while(data.distance[i] - curr_distance) > 0.02: #translate again if the distance is not close to the real distance
obstacle = translate(obstacle, (data.distance[i]-curr_distance)*cos(radians(data.direction[i])), (data.distance[i]-curr_distance)*sin(radians(data.direction[i])))
curr_distance = self.origin.distance(obstacle)
#print("distance to origin3:",curr_distance,data.distance[i])
x,y = obstacle.exterior.xy
ax.plot(x, y)
for i in range(self.n_direction):
x,y = self.obstacle_map[i].exterior.xy
ax.plot(x, y)
if obstacle.intersects(self.obstacle_map[i]):
intersection_poylgon = obstacle.intersection(self.obstacle_map[i])
xC,yC= intersection_poylgon.exterior.xy
ax.plot(xC, yC)
self.obstacle_distances[i] = min(self.obstacle_distances[i], self.origin.distance(intersection_poylgon))
print("obstacle_distances: ")#, self.obstacle_distances)
for i in range(self.n_direction-1,-1,-1):
print("distance in zone["+str(self.n_direction-i-1)+"]: "+ str(self.obstacle_distances[i]))
#print("argmin_distance:",np.argmin(self.obstacle_distances))
plt.show()
return self.obstacle_distances
def getState(self, safety_risk_msg):
obstacle_distances = list(self.sceneGraphReconstruction(safety_risk_msg))
done = False
#min_range = 0.01
#if (min_range > self.min_distance) or self.collision:
if self.collision:
done = True
self.collision = False
if self.getGoalDistance() < 0.5:
self.get_goalbox = True
return obstacle_distances + [self.robot_linear_speed, self.robot_angular_speed, self.risk_max, self.r_warning, self.r_clear], done
def publishScaleSpeed(self, left_vel_scale, right_vel_scale):
vel_scale_message = VelocityScale()
vel_scale_message.header = std_msgs.msg.Header()
vel_scale_message.header.stamp = rospy.Time.now()
vel_scale_message.left_vel_scale = left_vel_scale
vel_scale_message.right_vel_scale = right_vel_scale
self.pub_safe_vel.publish(vel_scale_message)
def respawn_goal(self, reset=False):
if reset:
self.vrep_control.changeScenario()
#self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.client.wait_for_server()
#create a move base goal message
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
#choosing position randomly
next_target_idx = random.randrange(0, len(self.target_list))
while (self.target_idx == next_target_idx):
next_target_idx = random.randrange(0, len(self.target_list))
self.target_idx = next_target_idx
next_goal = self.target_list[self.target_idx]
self.goal.target_pose.pose.position.x = next_goal[0]
self.goal.target_pose.pose.position.y = next_goal[1]
self.goal.target_pose.pose.position.z = 0.063
#choosing orientation randomly
orientation=geometry_msgs.msg.Quaternion()
yaw = random.uniform(-pi, pi)#-90*pi/180 #unit: from deg. to rad.
orientation=quaternion_from_euler(0,0,yaw)#(roll, pitch,yaw) # return an array
self.goal.target_pose.pose.orientation.x=0.0
self.goal.target_pose.pose.orientation.y=0.0
self.goal.target_pose.pose.orientation.z=orientation[2]
self.goal.target_pose.pose.orientation.w=orientation[3]
self.client.send_goal(self.goal)
#rospy.loginfo("Goal position is sent! waiting the robot to finish....")
def setReward(self, state, done, action):
nearest_obstacle_distance = min(state[:12])
nearest_obstacle_direction = np.argmin(state[:12]) #index 0 start from right side of the robot
yaw_reward = 1.0
#travelled_distance = self.distance2D(self.prev_position, self.position)
#print("travelled_distance:",travelled_distance)
if (nearest_obstacle_direction <= self.n_direction/3-1):#obstacle is on the right
if (action >= 10): #robot turns right
yaw_reward = -(action-9)/6
elif (nearest_obstacle_direction >= self.n_direction*2/3):#obstacle is on the left
if (action <= 5): #robot turns left
yaw_reward = -(6-action)/6
else:#obstacle is in the front
if (action in [6,7,8,9]):
yaw_reward = -(action-5)/4
distance_rate = 1.0 / max(nearest_obstacle_distance, 0.175)
if nearest_obstacle_distance < self.r_critical:
reward = (yaw_reward * distance_rate) -50
elif nearest_obstacle_distance < self.r_warning:
reward = (yaw_reward * distance_rate) -10
elif self.distance2D(self.prev_position, self.position) > 0.017:
if nearest_obstacle_distance < self.r_clear:
reward = (yaw_reward * distance_rate) + 1
else:
reward = 1
else:
reward = -1
#reward = (yaw_reward * distance_rate) + ob_reward
if done:
rospy.loginfo("Collision!!")
reward = -5000
self.publishScaleSpeed(0.0, 0.0)
if self.get_goalbox:
rospy.loginfo("Goal!!")
#reward = 500
self.publishScaleSpeed(0.0, 0.0)
self.respawn_goal(reset=True)
self.get_goalbox = False
return reward
def step(self, action):
self.publishScaleSpeed(self.action_list[action][0], self.action_list[action][1])
data = None
while data is None:
try:
data = rospy.wait_for_message('/turtlebot2i/safety/obstacles_risk', SafetyRisk, timeout=5)
except:
self.vrep_control.check_robot_correctness()
pass
state, done = self.getState(data)
reward = self.setReward(state, done, action)
return np.asarray(state), reward, done
def reset(self):
self.publishScaleSpeed(0,0)
self.vrep_control.reset_robot_pos()
self.respawn_goal(reset=True)
data = None
while data is None:
try:
data = rospy.wait_for_message('/turtlebot2i/safety/obstacles_risk', SafetyRisk, timeout=5)
except:
self.vrep_control.check_robot_correctness()
pass
state, done = self.getState(data)
return np.asarray(state)
| apache-2.0 |
gVallverdu/pymatgen | pymatgen/analysis/phase_diagram.py | 1 | 84336 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines tools to generate and analyze phase diagrams.
"""
import re
import collections
import itertools
import math
import logging
from monty.json import MSONable, MontyDecoder
from functools import lru_cache
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
from pymatgen.entries import Entry
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "May 16, 2011"
logger = logging.getLogger(__name__)
class PDEntry(Entry):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: composition
The composition associated with the PDEntry.
.. attribute:: energy
The energy associated with the entry.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
.. attribute:: attribute
A arbitrary attribute. Can be used to specify that the entry is a newly
found compound, or to specify a particular label for the entry, etc.
An attribute can be anything but must be MSONable.
"""
def __init__(self, composition: Composition, energy: float,
name: str = None, attribute: object = None):
"""
Args:
composition (Composition): Composition
energy (float): Energy for composition.
name (str): Optional parameter to name the entry. Defaults
to the reduced chemical formula.
attribute: Optional attribute of the entry. Must be MSONable.
"""
super().__init__(composition, energy)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy(self) -> float:
"""
:return: the energy of the entry.
"""
return self._energy
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def as_dict(self):
"""
:return: MSONable dict.
"""
return_dict = super().as_dict()
return_dict.update({"name": self.name,
"attribute": self.attribute})
return return_dict
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PDEntry
"""
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
"""
def __init__(self, entry, chempots, name=None):
"""
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super().__init__(new_comp_map, grandpot, entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
:return: MSONAble dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PDStructureEntry
"""
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
"""
def __init__(self, comp, original_entry):
"""
Args:
comp (Composition): Transformed composition as a Composition.
original_entry (PDEntry): Original entry that this entry arose from.
"""
super().__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}".format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
:return: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: TransformedPDEntry
"""
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves and are sorted alphabetically.
If specified, element ordering (e.g. for pd coordinates)
is preserved.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements))
elements = list(elements)
dim = len(elements)
entries = sorted(entries, key=lambda e: e.composition.reduced_composition)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=lambda e: e.composition.reduced_composition):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
"""
:return: The actual ndarray used to construct the convex hull.
"""
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
"""
:return: MSONAble dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: PhaseDiagram
"""
entries = [MontyDecoder().process_decoded(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
"""
Get the chemical potentials for all elements at a given composition.
:param comp: Composition
:return: Dict of chemical potentials.
"""
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_all_chempots(self, comp):
"""
Get chemical potentials at a given compositon.
:param comp: Composition
:return: Chemical potentials.
"""
# note the top part takes from format of _get_facet_and_simplex,
# but wants to return all facets rather than the first one that meets this criteria
c = self.pd_coords(comp)
allfacets = []
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
allfacets.append(f)
if not len(allfacets):
raise RuntimeError("No facets found for comp = {}".format(comp))
else:
chempots = {}
for facet in allfacets:
facet_elt_list = [self.qhull_entries[j].name for j in facet]
facet_name = '-'.join(facet_elt_list)
chempots[facet_name] = self._get_facet_chempots(facet)
return chempots
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of
the simplex in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition
except dep_elt.
The chemical potential of dep_elt is computed from the target
composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed
from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if e not in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials corresponding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if e not in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super().__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
"""
:return: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: GrandPotentialPhaseDiagram
"""
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super().__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict Representation
:return: CompoundPhaseDiagram
"""
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class ReactionDiagram:
"""
Analyzes the possible reactions between a pair of compounds, e.g.,
an electrolyte and an electrode.
"""
def __init__(self, entry1, entry2, all_entries, tol=1e-4,
float_fmt="%.4f"):
"""
Args:
entry1 (ComputedEntry): Entry for 1st component. Note that
corrections, if any, must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
entry2 (ComputedEntry): Entry for 2nd component. Note that
corrections must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
all_entries ([ComputedEntry]): All other entries to be
considered in the analysis. Note that corrections, if any,
must already be pre-applied.
tol (float): Tolerance to be used to determine validity of reaction.
float_fmt (str): Formatting string to be applied to all floats.
Determines number of decimal places in reaction string.
"""
elements = set()
for e in [entry1, entry2]:
elements.update([el.symbol for el in e.composition.elements])
elements = tuple(elements) # Fix elements to ensure order.
comp_vec1 = np.array([entry1.composition.get_atomic_fraction(el)
for el in elements])
comp_vec2 = np.array([entry2.composition.get_atomic_fraction(el)
for el in elements])
r1 = entry1.composition.reduced_composition
r2 = entry2.composition.reduced_composition
logger.debug("%d total entries." % len(all_entries))
pd = PhaseDiagram(all_entries + [entry1, entry2])
terminal_formulas = [entry1.composition.reduced_formula,
entry2.composition.reduced_formula]
logger.debug("%d stable entries" % len(pd.stable_entries))
logger.debug("%d facets" % len(pd.facets))
logger.debug("%d qhull_entries" % len(pd.qhull_entries))
rxn_entries = []
done = []
def fmt(fl):
return float_fmt % fl
for facet in pd.facets:
for face in itertools.combinations(facet, len(facet) - 1):
face_entries = [pd.qhull_entries[i] for i in face]
if any([e.composition.reduced_formula in terminal_formulas
for e in face_entries]):
continue
try:
m = []
for e in face_entries:
m.append([e.composition.get_atomic_fraction(el)
for el in elements])
m.append(comp_vec2 - comp_vec1)
m = np.array(m).T
coeffs = np.linalg.solve(m, comp_vec2)
x = coeffs[-1]
if all([c >= -tol for c in coeffs]) and \
(abs(sum(coeffs[:-1]) - 1) < tol) and \
(tol < x < 1 - tol):
c1 = x / r1.num_atoms
c2 = (1 - x) / r2.num_atoms
factor = 1 / (c1 + c2)
c1 *= factor
c2 *= factor
# Avoid duplicate reactions.
if any([np.allclose([c1, c2], cc) for cc in done]):
continue
done.append((c1, c2))
rxn_str = "%s %s + %s %s -> " % (
fmt(c1), r1.reduced_formula,
fmt(c2), r2.reduced_formula)
products = []
product_entries = []
energy = - (x * entry1.energy_per_atom +
(1 - x) * entry2.energy_per_atom)
for c, e in zip(coeffs[:-1], face_entries):
if c > tol:
r = e.composition.reduced_composition
products.append("%s %s" % (
fmt(c / r.num_atoms * factor),
r.reduced_formula))
product_entries.append((c, e))
energy += c * e.energy_per_atom
rxn_str += " + ".join(products)
comp = x * comp_vec1 + (1 - x) * comp_vec2
entry = PDEntry(
Composition(dict(zip(elements, comp))),
energy=energy, attribute=rxn_str)
entry.decomposition = product_entries
rxn_entries.append(entry)
except np.linalg.LinAlgError:
logger.debug("Reactants = %s" % (", ".join([
entry1.composition.reduced_formula,
entry2.composition.reduced_formula])))
logger.debug("Products = %s" % (
", ".join([e.composition.reduced_formula
for e in face_entries])))
rxn_entries = sorted(rxn_entries, key=lambda e: e.name, reverse=True)
self.entry1 = entry1
self.entry2 = entry2
self.rxn_entries = rxn_entries
self.labels = collections.OrderedDict()
for i, e in enumerate(rxn_entries):
self.labels[str(i + 1)] = e.attribute
e.name = str(i + 1)
self.all_entries = all_entries
self.pd = pd
def get_compound_pd(self):
"""
Get the CompoundPhaseDiagram object, which can then be used for
plotting.
Returns:
(CompoundPhaseDiagram)
"""
# For this plot, since the reactions are reported in formation
# energies, we need to set the energies of the terminal compositions
# to 0. So we make create copies with 0 energy.
entry1 = PDEntry(self.entry1.composition, 0)
entry2 = PDEntry(self.entry2.composition, 0)
cpd = CompoundPhaseDiagram(
self.rxn_entries + [entry1, entry2],
[Composition(entry1.composition.reduced_formula),
Composition(entry2.composition.reduced_formula)],
normalize_terminal_compositions=False)
return cpd
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDPlotter:
"""
A plotter class for phase diagrams.
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
r"""
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
**plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False, plt=None):
"""
:param label_stable: Whether to label stable compounds.
:param label_unstable: Whether to label unstable compounds.
:param ordering: Ordering of vertices.
:param energy_colormap: Colormap for coloring energy.
:param process_attributes: Whether to process the attributes.
:param plt: Existing plt object if plotting multiple phase diagrams.
:return: matplotlib.pyplot.
"""
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap, plt=plt,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
x1, x2, y1 = None, None, None
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
r"""
Draws the phase diagram using Matplotlib and show it.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False, plt=None):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
if plt is None:
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
r"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
**kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and sum([comp.get_atomic_fraction(el) for el in elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (nameup not in ordering) or (nameright not in ordering) or (nameleft not in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/cluster/mean_shift_.py | 15 | 12344 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
from collections import defaultdict
import numpy as np
from ..externals import six
from ..utils import extmath, check_random_state, gen_batches
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iterations=300):
"""Perform mean shift clustering of data using a flat kernel.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features]
Point used as initial kernel locations.
bin_seeding : boolean
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iterations
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iterations, addS the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iterations):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.cast[np.int32](point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = np.asarray(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
Jiankai-Sun/Jiankai-Sun.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py | 1 | 3099 | """UnitDblConverter module containing class UnitDblConverter."""
import numpy as np
from matplotlib import cbook
import matplotlib.units as units
import matplotlib.projections.polar as polar
__all__ = ['UnitDblConverter']
# A special function for use with the matplotlib FuncFormatter class
# for formatting axes with radian units.
# This was copied from matplotlib example code.
def rad_fn(x, pos=None):
"""Radian function formatter."""
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return str(x)
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return fr'${n//2}\pi$'
else:
return fr'${n}\pi/2$'
class UnitDblConverter(units.ConversionInterface):
"""
Provides Matplotlib conversion functionality for the Monte UnitDbl class.
"""
# default for plotting
defaults = {
"distance": 'km',
"angle": 'deg',
"time": 'sec',
}
@staticmethod
def axisinfo(unit, axis):
# docstring inherited
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Check to see if the value used for units is a string unit value
# or an actual instance of a UnitDbl so that we can use the unit
# value for the default axis label value.
if unit:
label = unit if isinstance(unit, str) else unit.label()
else:
label = None
if label == "deg" and isinstance(axis.axes, polar.PolarAxes):
# If we want degrees for a polar plot, use the PolarPlotFormatter
majfmt = polar.PolarAxes.ThetaFormatter()
else:
majfmt = U.UnitDblFormatter(useOffset=False)
return units.AxisInfo(majfmt=majfmt, label=label)
@staticmethod
def convert(value, unit, axis):
# docstring inherited
if not cbook.is_scalar_or_string(value):
return [UnitDblConverter.convert(x, unit, axis) for x in value]
# If the incoming value behaves like a number,
# then just return it because we don't know how to convert it
# (or it is already converted)
if units.ConversionInterface.is_numlike(value):
return value
# If no units were specified, then get the default units to use.
if unit is None:
unit = UnitDblConverter.default_units(value, axis)
# Convert the incoming UnitDbl value/values to float/floats
if isinstance(axis.axes, polar.PolarAxes) and value.type() == "angle":
# Guarantee that units are radians for polar plots.
return value.convert("rad")
return value.convert(unit)
@staticmethod
def default_units(value, axis):
# docstring inherited
# Determine the default units based on the user preferences set for
# default units when printing a UnitDbl.
if cbook.is_scalar_or_string(value):
return UnitDblConverter.defaults[value.type()]
else:
return UnitDblConverter.default_units(value[0], axis)
| gpl-2.0 |
giltis/xray-vision | xray_vision/mpl_plotting.py | 2 | 3226 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib.cm as mcm
import copy
def mark_region(ax, low, high, vline_style, span_style):
"""
Mark a region of a graph with vertical lines and an axvspan
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
vline_style : dict, optional
The style to use for the vertical lines
span_stlye : dict, optional
Style for axvspan behind central region
Returns
-------
vline_low, vline_hi : Line2D
Vertical lines at the thresholds
hspan : Patch
Patch over middle region
"""
# add vertical lines
vline_low = ax.axvline(low, **vline_style)
vline_high = ax.axvline(high, **vline_style)
hspan = ax.axvspan(low, high, **span_style)
return vline_low, vline_high, hspan
def split_plot(ax, x, y, low, high, inner_style, outer_style):
"""
Split styling of line based on the x-value
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
x, y : ndarray
Data, must be same length
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
inner_style, outer_style : dict
Dictionary of styles that can be passed to `ax.plot`
Returns
-------
lower, mid, upper : Line2D
The artists for the lower, midddle, and upper ranges
"""
low_mask = x < low
high_mask = x > high
mid_mask = ~np.logical_or(low_mask, high_mask)
low_mask[1:] |= low_mask[:-1]
high_mask[:-1] |= high_mask[1:]
lower, = ax.plot(x[low_mask], y[low_mask], **outer_style)
mid, = ax.plot(x[mid_mask], y[mid_mask], **inner_style)
upper, = ax.plot(x[high_mask], y[high_mask], **outer_style)
return lower, mid, upper
def show_label_array(ax, label_array, cmap=None, **kwargs):
"""
Display a labeled array nicely
Additional kwargs are passed through to `ax.imshow`.
If `vmin` is in kwargs, it is clipped to minimum of 0.5.
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
label_array : ndarray
Expected to be an unsigned integer array. 0 is background,
positive integers label region of interent
cmap : str or colormap, optional
Color map to use, defaults to 'Paired'
Returns
-------
img : AxesImage
The artist added to the axes
"""
if cmap is None:
cmap = 'Paired'
_cmap = copy.copy((mcm.get_cmap(cmap)))
_cmap.set_under('w', 0)
vmin = max(.5, kwargs.pop('vmin', .5))
ax.set_aspect('equal')
im = ax.imshow(label_array, cmap=cmap,
interpolation='nearest',
vmin=vmin,
**kwargs)
return im
| bsd-3-clause |
datapythonista/pandas | pandas/tests/groupby/test_filters.py | 2 | 20777 | import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
Timestamp,
)
import pandas._testing as tm
def test_filter_series():
s = Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
tm.assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index),
)
tm.assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index),
)
def test_filter_single_column_df():
df = DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd)
tm.assert_frame_equal(grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
tm.assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index),
)
tm.assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index),
)
def test_filter_multi_column_df():
df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2])
tm.assert_frame_equal(
grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected
)
def test_filter_mixed_df():
df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2])
tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected)
def test_filter_out_all_groups():
s = Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups():
s = Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
tm.assert_series_equal(filtered, s)
df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()})
grouper = df["A"].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x["A"].mean() > 0)
tm.assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df():
# GH12768
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
res = df.groupby("a")
res = res.filter(lambda x: x["b"].sum() > 5, dropna=False)
expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3})
tm.assert_frame_equal(expected, res)
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]})
res = df.groupby("a")
res = res.filter(lambda x: x["b"].sum() > 5, dropna=True)
expected = DataFrame({"a": [], "b": []}, dtype="int64")
tm.assert_frame_equal(expected, res)
def test_filter_condition_raises():
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
grouped.filter(raise_if_sum_is_zero)
def test_filter_with_axis_in_groupby():
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64")
result = data.groupby(level=0, axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
tm.assert_frame_equal(result, expected)
def test_filter_bad_shapes():
df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
s = df["B"]
g_df = df.groupby("B")
g_s = s.groupby(s)
f = lambda x: x
msg = "filter function returned a DataFrame, but expected a scalar bool"
with pytest.raises(TypeError, match=msg):
g_df.filter(f)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
g_s.filter(f)
f = lambda x: x == 1
msg = "filter function returned a DataFrame, but expected a scalar bool"
with pytest.raises(TypeError, match=msg):
g_df.filter(f)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
g_s.filter(f)
f = lambda x: np.outer(x, x)
msg = "can't multiply sequence by non-int of type 'str'"
with pytest.raises(TypeError, match=msg):
g_df.filter(f)
msg = "the filter must return a boolean result"
with pytest.raises(TypeError, match=msg):
g_s.filter(f)
def test_filter_nan_is_false():
df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
s = df["B"]
g_df = df.groupby(df["B"])
g_s = s.groupby(s)
f = lambda x: np.nan
tm.assert_frame_equal(g_df.filter(f), df.loc[[]])
tm.assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround():
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype("bool")]
new_way = grouped.filter(f)
tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype("bool")]
new_way = grouped.filter(f)
tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame(
{
"ints": Series(np.random.randint(0, 100, N)),
"floats": N / 10 * Series(np.random.random(N)),
"letters": Series(random_letters),
}
)
# Group by ints; filter on floats.
grouped = df.groupby("ints")
old_way = df[grouped.floats.transform(lambda x: x.mean() > N / 20).astype("bool")]
new_way = grouped.filter(lambda x: x["floats"].mean() > N / 20)
tm.assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.transform(lambda x: len(x) < N / 10).astype("bool")]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
tm.assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby("letters")
old_way = df[grouped.ints.transform(lambda x: x.mean() > N / 20).astype("bool")]
new_way = grouped.filter(lambda x: x["ints"].mean() > N / 20)
tm.assert_frame_equal(new_way, old_way)
def test_filter_using_len():
# BUG GH4447
df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)})
grouped = df.groupby("B")
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{"A": np.arange(2, 6), "B": list("bbbb"), "C": np.arange(2, 6)},
index=np.arange(2, 6),
)
tm.assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
tm.assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df["B"]
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ["b"], index=np.arange(2, 6), name="B")
tm.assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
tm.assert_series_equal(actual, expected)
def test_filter_maintains_ordering():
# Simple case: index is sequential. #4621
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}
)
s = df["pid"]
grouped = df.groupby("tag")
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
tm.assert_frame_equal(actual, expected)
grouped = s.groupby(df["tag"])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
tm.assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df["pid"]
grouped = df.groupby("tag")
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
tm.assert_frame_equal(actual, expected)
grouped = s.groupby(df["tag"])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
tm.assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df["pid"]
grouped = df.groupby("tag")
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
tm.assert_frame_equal(actual, expected)
grouped = s.groupby(df["tag"])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
tm.assert_series_equal(actual, expected)
def test_filter_multiple_timestamp():
# GH 10114
df = DataFrame(
{
"A": np.arange(5, dtype="int64"),
"B": ["foo", "bar", "foo", "bar", "bar"],
"C": Timestamp("20130101"),
}
)
grouped = df.groupby(["B", "C"])
result = grouped["A"].filter(lambda x: True)
tm.assert_series_equal(df["A"], result)
result = grouped["A"].transform(len)
expected = Series([2, 3, 2, 3, 3], name="A")
tm.assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
tm.assert_frame_equal(df, result)
result = grouped.transform("sum")
expected = DataFrame({"A": [2, 8, 2, 8, 8]})
tm.assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({"A": [2, 3, 2, 3, 3]})
tm.assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index():
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index():
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index():
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index():
# GH4620
t0 = Timestamp("2013-09-30 00:05:00")
t1 = Timestamp("2013-10-30 00:05:00")
t2 = Timestamp("2013-11-30 00:05:00")
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index():
# GH4620
index = list("bbbcbbab")
df = DataFrame(
{"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]},
index=index,
)
grouped_df = df.groupby("tag")
ser = df["pid"]
grouped_ser = ser.groupby(df["tag"])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
tm.assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
tm.assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
tm.assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name="pid")
# ^ made manually because this can get confusing!
tm.assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid")
tm.assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
tm.assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols():
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=["A", "B"])
g = df.groupby("A")
# previously didn't have access to col A #????
filt = g.filter(lambda x: x["A"].sum() == 2)
tm.assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness():
df = DataFrame(
[
["best", "a", "x"],
["worst", "b", "y"],
["best", "c", "x"],
["best", "d", "y"],
["worst", "d", "y"],
["worst", "d", "y"],
["best", "d", "z"],
],
columns=["a", "b", "c"],
)
with pytest.raises(TypeError, match="filter function returned a.*"):
df.groupby("c").filter(lambda g: g["a"] == "best")
def test_filter_non_bool_raises():
df = DataFrame(
[
["best", "a", 1],
["worst", "b", 1],
["best", "c", 1],
["best", "d", 1],
["worst", "d", 1],
["worst", "d", 1],
["best", "d", 1],
],
columns=["a", "b", "c"],
)
with pytest.raises(TypeError, match="filter function returned a.*"):
df.groupby("a").filter(lambda g: g.c.mean())
def test_filter_dropna_with_empty_groups():
# GH 10780
data = Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.groupby(level=0)
result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3))
tm.assert_series_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64)
tm.assert_series_equal(result_true, expected_true)
def test_filter_consistent_result_before_after_agg_func():
# GH 17091
df = DataFrame({"data": range(6), "key": list("ABCABC")})
grouper = df.groupby("key")
result = grouper.filter(lambda x: True)
expected = DataFrame({"data": range(6), "key": list("ABCABC")})
tm.assert_frame_equal(result, expected)
grouper.sum()
result = grouper.filter(lambda x: True)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
lail3344/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 24 | 2966 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY (random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
Aasmi/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
SanPen/GridCal | src/GridCal/Engine/Simulations/OPF/simple_dispatch_ts.py | 1 | 7892 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
"""
This file implements a DC-OPF for time series
That means that solves the OPF problem for a complete time series at once
"""
import numpy as np
from GridCal.Engine.basic_structures import MIPSolvers
from GridCal.Engine.Core.time_series_opf_data import OpfTimeCircuit
from GridCal.Engine.Simulations.OPF.opf_templates import OpfTimeSeries
class OpfSimpleTimeSeries(OpfTimeSeries):
def __init__(self, numerical_circuit: OpfTimeCircuit, start_idx, end_idx, solver: MIPSolvers = MIPSolvers.CBC,
batteries_energy_0=None, text_prog=None, prog_func=None):
"""
DC time series linear optimal power flow
:param numerical_circuit: NumericalCircuit instance
:param start_idx: start index of the time series
:param end_idx: end index of the time series
:param solver: MIP solver to use
:param batteries_energy_0: initial state of the batteries, if None the default values are taken
"""
OpfTimeSeries.__init__(self, numerical_circuit=numerical_circuit, start_idx=start_idx, end_idx=end_idx,
solver=solver)
self.text_prog = text_prog
self.prog_func = prog_func
# build the formulation
self.problem = None
def solve(self, msg=False):
"""
:param msg:
:return:
"""
nc = self.numerical_circuit
# general indices
n = nc.nbus
m = nc.nbr
ng = nc.ngen
nb = nc.nbatt
nl = nc.nload
nt = self.end_idx - self.start_idx
a = self.start_idx
b = self.end_idx
Sbase = nc.Sbase
# battery
# Capacity = nc.battery_Enom / Sbase
# minSoC = nc.battery_min_soc
# maxSoC = nc.battery_max_soc
# if batteries_energy_0 is None:
# SoC0 = nc.battery_soc_0
# else:
# SoC0 = (batteries_energy_0 / Sbase) / Capacity
# Pb_max = nc.battery_pmax / Sbase
# Pb_min = nc.battery_pmin / Sbase
# Efficiency = (nc.battery_discharge_efficiency + nc.battery_charge_efficiency) / 2.0
# cost_b = nc.battery_cost_profile[a:b, :].transpose()
# generator
Pg_max = nc.generator_pmax / Sbase
Pg_min = nc.generator_pmin / Sbase
P_profile = nc.generator_p[a:b, :] / Sbase
cost_g = nc.generator_cost[a:b, :]
enabled_for_dispatch = nc.generator_active
# load
Pl = np.zeros((nt, nl))
Pg = np.zeros((nt, ng))
Pb = np.zeros((nt, nb))
E = np.zeros((nt, nb))
theta = np.zeros((nt, n))
for i, t in enumerate(range(a, b)):
# generator share:
Pavail = (Pg_max * nc.generator_active[t, :])
Gshare = Pavail / Pavail.sum()
Pl[i] = (nc.load_active[t, :] * nc.load_s.real[t, :]) / Sbase
Pg[i] = Pl[i].sum() * Gshare
if self.text_prog is not None:
self.text_prog('Solving ' + str(nc.time_array[t]))
if self.prog_func is not None:
self.prog_func((i+1) / nt * 100.0)
# Assign variables to keep
# transpose them to be in the format of GridCal: time, device
self.theta = theta
self.Pg = Pg
self.Pb = Pb
self.Pl = Pl
self.E = E
self.load_shedding = np.zeros((nt, nl))
self.s_from = np.zeros((nt, m))
self.s_to = np.zeros((nt, m))
self.overloads = np.zeros((nt, m))
self.rating = nc.branch_rates[a:b, :] / Sbase
self.nodal_restrictions = np.zeros((nt, n))
def get_voltage(self):
"""
return the complex voltages (time, device)
:return: 2D array
"""
return np.ones_like(self.theta) * np.exp(-1j * self.theta)
def get_overloads(self):
"""
return the branch overloads (time, device)
:return: 2D array
"""
return self.overloads
def get_loading(self):
"""
return the branch loading (time, device)
:return: 2D array
"""
return self.s_from / self.rating
def get_branch_power(self):
"""
return the branch loading (time, device)
:return: 2D array
"""
return self.s_from * self.numerical_circuit.Sbase
def get_battery_power(self):
"""
return the battery dispatch (time, device)
:return: 2D array
"""
return self.Pb * self.numerical_circuit.Sbase
def get_battery_energy(self):
"""
return the battery energy (time, device)
:return: 2D array
"""
return self.E * self.numerical_circuit.Sbase
def get_generator_power(self):
"""
return the generator dispatch (time, device)
:return: 2D array
"""
return self.Pg * self.numerical_circuit.Sbase
def get_load_shedding(self):
"""
return the load shedding (time, device)
:return: 2D array
"""
return self.load_shedding * self.numerical_circuit.Sbase
def get_load_power(self):
"""
return the load shedding (time, device)
:return: 2D array
"""
return self.Pl * self.numerical_circuit.Sbase
def get_shadow_prices(self):
"""
Extract values fro the 2D array of LP variables
:return: 2D numpy array
"""
return self.nodal_restrictions
if __name__ == '__main__':
from GridCal.Engine import *
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Lynn 5 Bus pv.gridcal'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/grid_2_islands.xlsx'
main_circuit = FileOpen(fname).open()
# get the power flow options from the GUI
solver = SolverType.Simple_OPF
mip_solver = MIPSolvers.CBC
grouping = TimeGrouping.Daily
pf_options = PowerFlowOptions()
options = OptimalPowerFlowOptions(solver=solver,
grouping=grouping,
mip_solver=mip_solver,
power_flow_options=pf_options)
start = 0
end = len(main_circuit.time_profile)
# create the OPF time series instance
# if non_sequential:
optimal_power_flow_time_series = OptimalPowerFlowTimeSeries(grid=main_circuit,
options=options,
start_=start,
end_=end)
optimal_power_flow_time_series.run()
v = optimal_power_flow_time_series.results.voltage
print('Angles\n', np.angle(v))
l = optimal_power_flow_time_series.results.loading
print('Branch loading\n', l)
g = optimal_power_flow_time_series.results.generator_power
print('Gen power\n', g)
pr = optimal_power_flow_time_series.results.shadow_prices
print('Nodal prices \n', pr)
import pandas as pd
pd.DataFrame(optimal_power_flow_time_series.results.loading).to_excel('opf_loading.xlsx') | gpl-3.0 |
kaichogami/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
cogeorg/black_rhino | examples/firesales_SA/firesale-leverage.py | 2 | 3227 | #!/usr/bin/env python
# [SublimeLinter pep8-max-line-length:300]
# -*- coding: utf-8 -*-
"""
This is a minimal example.
black_rhino is a multi-agent simulator for financial network analysis
Copyright (C) 2012 Co-Pierre Georg ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
The development of this software has been supported by the ERA-Net
on Complexity through the grant RESINEE.
"""
# -------------------------------------------------------------------------
#
# MAIN
#
# -------------------------------------------------------------------------
if __name__ == '__main__':
from src.environment import Environment
from src.runner import Runner
import logging
import pandas as pd
import numpy as np
import random
from src.frange import frange
import matplotlib.pyplot as plt
import sys
import decimal
# INITIALIZATION
#
environment_directory = str("configs/environment/")
identifier = str("firesales")
############
environment = Environment(environment_directory, identifier)
runner = Runner(environment)
############
#Declare variables of interest for the simulation
print("the illiquidity paramer is"), environment.static_parameters['illiquidity'] #is declared inside ENV config
print("the num_simulations paramer is"), environment.static_parameters['num_simulations'] # is declared inside ENV config
environment.static_parameters['leverage_increase'] = (1/3)
print environment.static_parameters['leverage_increase']
for i in range(int(environment.static_parameters['num_simulations'])):
if i == 0:
print("**********START simulation %s") % (i+1)
environment.initialize(environment_directory, identifier)
#specify shock for asset class
environment.shocks[0].asset_returns['m_14'] = -0.2
#print environment.shocks[0].asset_returns['m_14']
# print environment.shocks[0].asset_returns
runner.initialize(environment)
# # do the run
runner.do_run(environment, )
df1 = runner.updater.env_var_par_df
print(' Run DONE')
print("***\nSimulation number %s had total number of %s sweeps", i, str(runner.num_sweeps))
print("***\nThis run had the illiquidity coefficient %s " , environment.static_parameters['illiquidity'])
df_all = pd.concat([df1], keys=[ '-0.2'], ignore_index = False).to_csv("output/leverage_" + str(environment.static_parameters['illiquidity']) +"_"+ str(environment.num_agents) + "_" + str(environment.num_sweeps) + ".csv")
if i==1:
pass | gpl-3.0 |
ChanChiChoi/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
rs2/pandas | pandas/tests/frame/methods/test_count.py | 2 | 1068 | from pandas import DataFrame, Series
import pandas._testing as tm
class TestDataFrameCount:
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=range(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=range(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
| bsd-3-clause |
Lawrence-Liu/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
AndKe/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 32 | 14968 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_TYPE to 10 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
miic-sw/miic | miic.core/src/miic/core/change_processing.py | 1 | 41161 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 8 15:11:53 2016
@author: chris
"""
import numpy as np
from copy import deepcopy
from datetime import datetime
from scipy.optimize import fmin
import pdb
from miic.core.miic_utils import serial_date_from_datetime, convert_time
from miic.core.plot_fun import plot_dv
from obspy.signal.util import next_pow_2
def time_select(dv_dict,starttime=None,endtime=None):
""" Select time period from change data
"""
dvc = deepcopy(dv_dict)
time = convert_time(dvc['time'])
# convert starttime and endtime input.
# if they are None take the first or last values of the time vector
if starttime == None:
starttime = time[0]
else:
if not isinstance(starttime, datetime):
starttime = convert_time([starttime])[0]
if endtime == None:
endtime = time[-1]
else:
if not isinstance(endtime, datetime):
endtime = convert_time([endtime])[0]
# select period
ind = np.nonzero((time >= starttime) * (time < endtime))[0] # ind is
# a list(tuple) for dimensions
dvc['value'] = dv_dict['value'][ind]
dvc['time'] = dv_dict['time'][ind]
return dvc
def estimate_trend(dv_dict):
"""Estimates a linear trend in the change measurements contained in
dv_dict.
"""
# create time vector
xi = []
for t in convert_time(dv_dict['time']):
xi.append(serial_date_from_datetime(t))
xi = np.array(xi)
# create matrix for inversion
A = np.array([xi, np.ones(len(xi))])
# invertion
w = np.linalg.lstsq(A.T,-dv_dict['value'])[0] # obtaining the parameters
y = w[0]*xi + w[1]
std = np.std(y-(-dv_dict['value']))
w = np.append(w, std)
return w
def dt_baseline(dt_dict):
"""Find best baseline of time shift measurement
In a time shift measurement on a set of noise correlation functions the
baseline is undefined as the reference is arbitrary. However, if two
stations recieve GPS signal the time difference will be constant. This
means that shifts resulting from wrong timing will be random and those from
correct times are constant. Here we estimate the most common time shift and
assume that it characterises periods with correctly working clocks.
"""
ind = ~np.isnan(dt_dict['value'])
hh = np.histogram(dt_dict['value'][ind],bins=np.squeeze(dt_dict['second_axis']))
dt_bl = hh[1][np.argmax(hh[0])]
return dt_bl
def dv_combine(dv_list, method='average_sim_mat'):
"""Combine a set of change measuements
A list of dv dictionaries from e.g. different channels of the same station
combination is combined into a single dv dictionary.
If method is 'average_sim_mat' the similarity matrices are averaged. The
value along the crest of the averated matrix is used as new value.
"""
assert type(dv_list) == type([]), "dv_list is not a list"
if method == 'average_sim_mat':
res_dv = deepcopy(dv_list[0])
for dv in dv_list[1:]:
res_dv['sim_mat'] += dv['sim_mat']
res_dv['sim_mat'] /= len(dv_list)
res_dv['value'] = res_dv['second_axis'][np.argmax(res_dv['sim_mat'],axis=1)]
res_dv['corr'] = np.max(res_dv['sim_mat'],axis=1)
return res_dv
def dv_combine_multi_ref(dv_list,max_shift=0.01,method='shift'):
"""Combine a list of change measuments with different reference traces
Combine a list of change dictionaries obtained from different references
into a single one. The similarity matricies are simply averaged after they
have been shifted to account for the offset between the different
references. The offset between the references is estimated as the shift
between the two correlation matrices with two possible methods: maximum of
the summed product of shifted similarity matrices (`shift`) or the median
of the difference between the estimated changes (`diff`). The first
measurement in the list is not shifted. If the input list of measurements
is longer than two, the individual shifts (with respect to the unshifted
first measurment) is estimated from least squares inversion of the shifts
between all measurements.
:type dv_list: list dict
:param dv_list: list of velociy change dictionaries to be combined
:type max_shift: float
:param max_shift: maximum shift to be tested beween different measuremants
:param method: str
:param method: method to estimate the offset between two measurements
:type: dict
:return: combined dv_dict
"""
assert type(dv_list) == type([]), "dv_list is not a list"
assert method in ['shift','diff'], "method has to be either 'shift' or "\
"'diff'."
#stps should be at mostas large as the lagest second axis maller than max_shftp
steps = max_shift/(dv_list[0]['second_axis'][1]-dv_list[0]['second_axis'][0])
steps = int(np.floor(steps))
shift = []
G = np.zeros(((len(dv_list)**2-len(dv_list))/2,len(dv_list)-1),dtype=float)
cnt = 0
for ind1,dv1 in enumerate(dv_list):
for ind2,dv2 in enumerate(dv_list):
if ind2 > ind1:
shift.append(_dv_shift(dv1,dv2,steps,method))
if ind1 > 0: # assume hat first refrence is not shifed
G[cnt,ind1-1] = 1
G[cnt,ind2-1] = -1
cnt += 1
offset = np.linalg.lstsq(G,shift)[0]
offset = np.concatenate(([0],(np.round(offset)).astype(int)))
cdv = deepcopy(dv_list[0])
ns = int(len(cdv['second_axis']))
for ind in range(1,len(dv_list)):
cdv['sim_mat'][:,np.max([0,offset[ind]]):np.min([ns,ns+offset[ind]])] += \
dv_list[ind]['sim_mat'][:,-np.min([0.,offset[ind]]):np.min([ns,ns-offset[ind]])]
cdv['sim_mat'] /= len(dv_list)
cdv['value'] = np.argmax(cdv['sim_mat'],axis=1)
cdv['corr'] = np.max(cdv['sim_mat'],axis=1)
cdv['value']= cdv['second_axis'][cdv['value']]
return cdv
def _dv_shift(dv1,dv2,steps,method):
if method == 'shift':
c = []
shi_range = np.arange(-steps,steps)
for shi in shi_range:
c.append(np.nansum(dv2['sim_mat'][:,steps+shi:-steps+shi]*dv1['sim_mat'][:,steps:-steps]))
shift = shi_range[np.argmax(c)]
elif method == 'diff':
shift_val = np.median(dv2['value'] - dv1['value'])
shiftn = np.argmin(np.abs(shift_val-dv1['second_axis']))
shiftz = np.argmin(np.abs(-dv1['second_axis']))
shift = shiftn - shiftz
return shift
def model_dv(dv,model_type,param=()):
"""Model velocity change measurements
:type model: str
:param model: type of model to be fitted to the data
:type param: dict
:param param: dictionary with parameters for the model to be fitted
:rtype: dict
:return: dictionary with model parameters and modelled velocity changes
"""
if model_type == "seasonal":
model_res = _model_seasonal(dv,param)
elif model_type == "trend":
model_res = _model_trend(dv,param)
elif model_type == "const_exp_recovery":
model_res = _model_const_exp_recovery(dv,param)
elif model_type == 'trend_const_exp_recovery':
model_res = _model_trend_const_exp_recovery(dv,param)
model = {'type':model_type,'model_res':model_res,'param':param}
model_dat = model_dv_forward(dv,model_type,model_res,param)
model.update({'model_dat':model_dat})
return model
def model_dv_forward(dv,model_type,model_res,param=()):
if model_type == "seasonal":
model = _model_seasonal_forward(dv,model_res)
elif model_type == "trend":
model = _model_trend_forward(dv,model_res)
elif model_type == "const_exp_recovery":
model = _model_const_exp_recovery_forward(dv,model_res,param[0])
elif model_type == 'trend_const_exp_recovery':
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
model = _model_trend_const_exp_recovery_forward(dv,param.append(xdata))
return model
#def _model_trend_const_exp_recovery_forward(dv,param)
def _model_seasonal_forward(dv,x):
pfac = 1.991021277657232e-07 # 2*pi/length_of_year_in_second
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
xdata = np.array(xdata)
model = x[0] * np.cos(xdata*pfac) + x[1] * np.sin(xdata*pfac) + x[2]
# construct a dv dictionary to return the model
ret_model = deepcopy(dv)
ret_model['value'] = model
# find correlation values along model
tmp = np.tile(dv['second_axis'],(dv['sim_mat'].shape[0],1))
tmp -= np.tile(np.atleast_2d(model).T,(1,dv['sim_mat'].shape[1]))
ind = np.argmin(np.abs(tmp),axis=1)
ret_model['corr'] = [dv['sim_mat'][ii,ind[ii]] \
for ii in range(dv['sim_mat'].shape[0])]
return ret_model
def _model_seasonal(dv,param):
"""Model a seasonal cycle
"""
if not param:
param = (0., 0., 0.)
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
xdata = np.array(xdata)
model_res = fmin(_sine_fit, [param[0], param[1], param[2]], \
args=(xdata,dv), xtol=0.00001)
return model_res
def _sine_fit(x,xdata,dv):
pfac = 1.991021277657232e-07 # 2*pi/length_of_year_in_second
model = x[0] * np.cos(xdata*pfac) + x[1] * np.sin(xdata*pfac) + x[2]
scor = _calc_misfit(dv,model)
return np.nansum(np.max(dv['sim_mat'],axis=1)) - scor
def _model_trend(dv,param):
"""Model a linead trend
"""
if not param:
print "param not given"
param = (0., 0.,1)
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
xdata = np.array(xdata)
#model_res = minimize(_trend_fit,(param[0], param[1]),args=(xdata,dv),tol=1e-20,bounds=((-0.03,0.03),(-2,2)))
#model_res = fmin(_trend_fit, [param[0], param[1]], \
# args=(xdata,dv),xtol=1e-14)
import pdb
pdb.set_trace()
# alternative grid serch
c = dv['sim_mat'].ravel()
lsa = len(dv['second_axis'])
lt = len(dv['time'])
inc = param[2]
slval = np.arange(-lsa+1,lsa-1)
csum = np.zeros((len(slval),lsa))
x = np.zeros((2))
for sl in slval[::inc]:
ind = np.round(np.linspace(0,(lt-1)*lsa+sl,lt)).astype(int)
for st in (range(np.max((-sl,0)),np.min((lsa,lsa-sl))))[::inc]:
csum[int(sl+lsa-1),int(st)] = np.nansum(c[ind+st])
st = np.argmax(np.max(csum,axis=0))
sl = slval[np.argmax(csum[:,st])]
t = convert_time(dv['time'])
x[0] = dv['second_axis'][st]
x[1] = sl * (dv['second_axis'][-1] -dv['second_axis'][0])/(lsa-1) /\
(t[-1]-t[0]).days
model_res = x
return model_res
def _trend_fit(x,xdata,dv):
model = x[0] + 1e-10*x[1]*(xdata-xdata[0])
scor = _calc_misfit(dv,model)
return np.nansum(np.max(dv['sim_mat'],axis=1)) - scor
def _model_trend_forward(dv,x):
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
xdata = np.array(xdata)
model = x[0] + 1e-10*x[1]*(xdata-xdata[0])
# construct a dv dictionary to return the model
ret_model = deepcopy(dv)
ret_model['value'] = model
# find correlation values along model
tmp = np.tile(dv['second_axis'],(dv['sim_mat'].shape[0],1))
tmp -= np.tile(np.atleast_2d(model).T,(1,dv['sim_mat'].shape[1]))
ind = np.argmin(np.abs(tmp),axis=1)
ret_model['corr'] = [dv['sim_mat'][ii,ind[ii]] \
for ii in range(dv['sim_mat'].shape[0])]
return ret_model
def _model_const_exp_recovery(dv,param):
"""Model with exponentially recovering perturbations
:type param[0]: obspy.stream
:param param[0]: timeseries with excitation
:type param[1]: float
:param param[1]: amplitude scaling of the excitation
:type param[2]: float
:param param[2]: time constant of recovery in seconds
"""
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
xdata = np.array(xdata)
exc = param[0]
#model_res = minimize(_const_exp_recovery_fit, [1, 1], \
# args=(xdata,dv,exc,param[1],param[2]),bounds=())
model_res = fmin(_const_exp_recovery_fit, [1,1], \
args=(xdata,dv,exc,param[1], param[2]),xtol=0.001,maxiter=100)
model_res[0] *= param[1]
model_res[1] *= param[2]
return model_res
def _const_exp_recovery_fit(x,xdata,dv,exc,amp_scale,time_scale):
print x
a = x[0] * amp_scale
tau = x[1] * time_scale
t = exc[0].stats['starttime']
st = float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)
dt = exc[0].stats['delta']
tv = [st+dt*ind for ind in range(exc[0].stats['npts'])]
tv = np.array(tv)
rec = a * np.exp(-(tv-tv[0])/tau)
model = np.convolve(exc[0].data,rec,"full")
model = model[:len(exc[0].data)]
model = np.interp(xdata,tv,model)
scor = _calc_misfit(dv,model)
return np.nansum(np.max(dv['sim_mat'],axis=1)) - scor
def _model_const_exp_recovery_forward(dv,x,exc):
xdata = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(dv['time'])]
xdata = np.array(xdata)
t = exc[0].stats['starttime']
st = float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)
dt = exc[0].stats['delta']
tv = np.arange(st,st+dt*exc[0].stats['npts'],dt)
tv = [st+dt*ind for ind in range(exc[0].stats['npts'])]
tv = np.array(tv)
rec = x[0] * np.exp(-(tv-tv[0])/x[1])
model = np.convolve(exc[0].data,rec,"full")
model = model[:len(exc[0].data)]
model = np.interp(xdata,tv,model)
# construct a dv dictionary to return the model
ret_model = deepcopy(dv)
ret_model['value'] = model
# find correlation values along model
tmp = np.tile(dv['second_axis'],(dv['sim_mat'].shape[0],1))
tmp -= np.tile(np.atleast_2d(model).T,(1,dv['sim_mat'].shape[1]))
ind = np.argmin(np.abs(tmp),axis=1)
ret_model['corr'] = [dv['sim_mat'][ii,ind[ii]] \
for ii in range(dv['sim_mat'].shape[0])]
return ret_model
def _calc_misfit(dv,model):
"""Calculate the sum of the correlation values along the model curve
"""
tmp = np.tile(dv['second_axis'],(dv['sim_mat'].shape[0],1))
tmp -= np.tile(np.atleast_2d(model).T,(1,dv['sim_mat'].shape[1]))
ind = np.argmin(np.abs(tmp),axis=1)
scor = np.nansum([dv['sim_mat'][ii,ind[ii]] \
for ii in range(dv['sim_mat'].shape[0])])
return scor
def _modelvec_from_dict(model_par):
""" create a vector of parameters to be inverted from the model dictionary
"""
x0 = []
for model_type in sorted(model_par['type']):
for param in sorted(model_par[model_type]):
if param in model_par[model_type]['to_vari']:
x0.append(model_par[model_type][param])
return x0
def _modeldict_from_vec(start_model,x):
model_par = deepcopy(start_model)
xcnt = 0
for model_type in sorted(start_model['type']):
for param in sorted(start_model[model_type]['to_vari']):
model_par[model_type][param] = x[xcnt]
xcnt += 1
return model_par
def _misfit_int_corr(x, dv, dv_time, exc_time, exc, start_model):
"""Calculate misfit between modeled velocity and measured streatching.
Stretching is -1 * velocity change
"""
# construct model_par
model_par = _modeldict_from_vec(start_model,x)
model = _forward(dv_time, exc_time, exc, model_par)
"""
scale = dv['second_axis'][1] - dv['second_axis'][0]
offset = dv['second_axis'][0]
mod_ind = (np.round((model-offset) / scale).astype(int))
ser_ind = mod_ind + np.arange(0,dv['sim_mat'].shape[0]*
dv['sim_mat'].shape[1],
dv['sim_mat'].shape[1])
off_ind = np.stack((mod_ind>0,mod_ind<len(dv['second_axis'])),axis=0)
off_ind = np.all(off_ind, axis=0)
scorr = np.nansum(dv['sim_mat'].ravel()[ser_ind[off_ind]])
scorr -= (len(off_ind) - np.sum(off_ind)) # values off sclae get -1
mf = -scorr/len(model) # -1 would be a perfekt fit
print x, mf
"""
corr = _model_correlation(dv, model)
mf = -np.nanmean(corr)
print mf,
return mf
def _model_correlation(dv, model):
"""
return the correlation value for velocity given by model
"""
scale = dv['second_axis'][1] - dv['second_axis'][0]
offset = dv['second_axis'][0]
mod_ind = (np.round((model-offset) / scale).astype(int))
ser_ind = mod_ind + np.arange(0,dv['sim_mat'].shape[0]*
dv['sim_mat'].shape[1],
dv['sim_mat'].shape[1])
off_ind = np.stack((mod_ind>0,mod_ind<len(dv['second_axis'])),axis=0)
off_ind = np.all(off_ind, axis=0)
ser_ind[~off_ind] = 0
corr = dv['sim_mat'].ravel()[ser_ind]
corr[~off_ind] = -1
return corr
def _misfit_L2(x, dv, dv_time, exc_time, exc, start_model):
# construct model_par
model_par = _modeldict_from_vec(start_model,x)
#print model_par
model = _forward(dv_time, exc_time, exc, model_par)
mf = np.nanmean((model - dv['value'])**2)
print mf,
return mf
def _print_model_par(model_par):
for type in sorted(model_par['type']):
print '%s:' % type
for par in model_par[type].keys():
if par not in ['to_vari','units']:
print '\t%s = %s %s' % (par,str(model_par[type][par]),
model_par[type]['units'][par])
print '\tfitted parameters: %s\n' % str(model_par[type]['to_vari'])
def substract_model(dv):
""" Substract the modelled change from the data.
"""
assert 'model_value' in dv, "You need to calculate a model first"
if 'sim_mat' in dv:
tmp_sim_mat = np.zeros_like(dv['sim_mat'])
la = len(dv['second_axis'])
cent = np.argmin(np.abs(dv['second_axis']))
for ind in range(len(dv['time'])):
shift = np.argmin(np.abs(dv['second_axis']-dv['model_value'][ind])) - cent
tmp_sim_mat[ind,max((0,-shift)):min((la,la-shift))] = \
dv['sim_mat'][ind,max((0,shift)):min((la,la+shift))]
dv['sim_mat'] = tmp_sim_mat
dv['value'] -= dv['model_value']
dv['model_value'] *= 0.
class Change:
def __init__(self):
print 'init'
self.dv = {}
def copy(self):
return deepcopy(self)
def set_dv(self, dv):
self.dv = dv
def set_excitation(self, excitation):
t = excitation.stats['starttime']
st = float(t.toordinal())*86400+float(t.hour)*3600+ \
float(t.minute)*60 + float(t.second)
dt = excitation.stats['delta']
exc_time = np.arange(st,st+dt*excitation.stats['npts'],dt)
exc_time = [st+dt*ind for ind in range(excitation.stats['npts'])]
# set time of first measurement to zero
exc_time = np.array(exc_time)# - dv_time[0]
self.excitation = excitation
self.excitation_time = exc_time
def model_forward(self, misfit_function=_misfit_int_corr):
self.dv = model_forward(self.dv, self.model_par, self.excitation,
misfit_function)
def substract_model(self):
substract_model(self.dv)
def set_start_model(self, type):
"""
'type' should be a list of types of changes the shall be included in
the model possibilities are 'offset', 'trend', 'cyclic', 'const_exp'
"""
self.start_model = {'type':type}
for this_type in type:
if this_type == 'offset':
self.start_model.update({'offset':{'offset':0.,
'to_vari':['offset'],
'units':{'offset':'1'}}})
elif this_type == 'trend':
self.start_model.update({'trend':{'trend':0.,
'to_vari':['trend'],
'units':{'trend':'1/a'}}})
elif this_type == 'cyclic':
self.start_model.update({'cyclic':{'sine':0.,'cosine':0.,
'period':365,
'to_vari':['sine',
'cosine'],
'units':{'cosine':'1',
'sine':'1',
'period':'d'}}})
elif this_type == 'const_exp':
self.start_model.update({'const_exp':{'sensitivity':1.,
'tau':1.,
'to_vari':['sensitivity',
'tau'],
'units':{'sensitivity':'1/input',
'tau':'a'}}})
elif this_type == 'var_exp':
self.start_model.update({'var_exp':{'sensitivity':1.,
'tau_scale':1.,
'to_vari':['sensitivity',
'tau_scale'],
'units':{'sensitivity':'1/input',
'tau_scale':'a/input'}}})
elif this_type == 'const_log':
self.start_model.update({'const_log':{'sensitivity':1.,
'tau_min':-1,
'tau_max':2.,
'to_vari':['sensitivity',
'tau_max'],
'units':{'sensitivity':'1/input',
'tau_min':'log10(d)',
'tau_max':'log10(d)'}}})
elif this_type == 'var_log':
self.start_model.update({'var_log':{'sensitivity':1.,
'tau_min':-1,
'tau_max_scale':2.,
'to_vari':['sensitivity',
'tau_max_scale'],
'units':{'sensitivity':'1/input',
'tau_min':'log10(d)',
'tau_max_scale':'log10(d)/input'}}})
elif this_type == 'state_model':
self.start_model.update({'state_model':{'sensitivity':1.,
'tau_min':-2,
'tau_max':3,
'Ntau':100,
'to_vari':['sensitivity','tau_max'],
'units':{'sensitivity':'1/input',
'tau_min':'log10(d)',
'tau_max':'log10(d)',
'Ntau':'1'}}})
elif this_type == 'flat_state_model':
self.start_model.update({'flat_state_model':{'sensitivity':1.,
'tau_min':-2,
'tau_max':3,
'Ntau':100,
'to_vari':['sensitivity','tau_max'],
'units':{'sensitivity':'1/input',
'tau_min':'log10(d)',
'tau_max':'log10(d)',
'Ntau':'1'}}})
elif this_type == 'strain_rate_model':
self.start_model.update({'strain_rate_model':{'sensitivity':1.,
'tau_min':-2,
'tau_max':3,
'Ntau':100,
'to_vari':['sensitivity','tau_max'],
'units':{'sensitivity':'1/input',
'tau_min':'log10(d)',
'tau_max':'log10(d)',
'Ntau':'1'}}})
def fit_model(self, misfit_function=_misfit_int_corr):
"""
'excitation' is supposed to be a obspy.trace
"""
# generate vector for starting model
x0 = _modelvec_from_dict(self.start_model)
# create time vector for dv measurements
dv_time = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(self.dv['time'])]
dv_time = np.array(dv_time)
# create time vector for excitation
mod_with_exc = ['const_exp','var_exp','const_log','var_log',
'state_model','flat_state_model','strain_rate_model']
if any(x in self.start_model['type'] for x in mod_with_exc):
assert hasattr(self,'excitation'), 'Attach an excitation first'
"""
t = excitation.stats['starttime']
st = float(t.toordinal())*86400+float(t.hour)*3600+ \
float(t.minute)*60 + float(t.second)
dt = excitation.stats['delta']
exc_time = np.arange(st,st+dt*excitation.stats['npts'],dt)
exc_time = [st+dt*ind for ind in range(excitation.stats['npts'])]
# set time of first measurement to zero
"""
exc_time = self.excitation_time - dv_time[0]
exc = self.excitation.data
else:
exc_time = []
exc = []
# set time of first measurement to zero
dv_time -= dv_time[0]
ret = fmin(misfit_function, x0, args=(self.dv, dv_time,
exc_time, exc, self.start_model),
xtol=1e-8,maxiter=1000,
full_output=True)
x = ret[0]
mf = ret[1]
self.model_par = _modeldict_from_vec(self.start_model,x)
self.dv['model_value'] = _forward(dv_time, exc_time, exc,
self.model_par)
self.dv['model_corr'] = _model_correlation(self.dv,
self.dv['model_value'])
self.dv['misfit_function'] = '%s' % misfit_function
self.dv['model_misfit'] = mf
def plot(self):
plot_dv(self.dv)
def print_start_model(self):
if not hasattr(self,'start_model'):
print 'No start_model defined.'
return
_print_model_par(self.start_model)
def print_model(self):
if not hasattr(self,'model_par'):
print 'No model defined.'
return
_print_model_par(self.model_par)
print 'Model misfit: %e\n' % self.dv['model_misfit']
def model_forward(dv,model_par,excitation=[],misfit_function=_misfit_int_corr):
# create time vector for dv measurements
#pdb.set_trace()
tdv = deepcopy(dv)
dv_time = [(float(t.toordinal())*86400+float(t.hour)*3600+float(t.minute)*60\
+float(t.second)) for t in convert_time(tdv['time'])]
dv_time = np.array(dv_time)
if excitation:
t = excitation.stats['starttime']
st = float(t.toordinal())*86400+float(t.hour)*3600+ \
float(t.minute)*60 + float(t.second)
dt = excitation.stats['delta']
exc_time = np.arange(st,st+dt*excitation.stats['npts'],dt)
exc_time = [st+dt*ind for ind in range(excitation.stats['npts'])]
# set time of first measurement to zero
exc_time = np.array(exc_time) - dv_time[0]
exc = excitation.data
else:
exc = []
exc_time = []
dv_time -= dv_time[0]
tdv['model_value'] = _forward(dv_time, exc_time, exc, model_par)
tdv['model_corr'] = _model_correlation(tdv, tdv['model_value'])
tdv['misfit_function'] = '%s' % misfit_function
mf = (_modelvec_from_dict(model_par), tdv, dv_time, exc_time,
exc,model_par)
tdv['model_misfit'] = mf
return tdv
def _forward(dv_time, exc_time, exc, model_par):
""" Model stretching due to changes using different components
Assume all components are additive.
"""
mdv = np.zeros(len(dv_time),dtype=float)
if 'offset' in model_par['type']:
mdv += model_par['offset']['offset']
if 'trend' in model_par['type']:
mdv += model_par['trend']['trend'] * dv_time/31536000
if 'cyclic' in model_par['type']:
phase = dv_time*2*np.pi/(model_par['cyclic']['period']*86400)
mdv += model_par['cyclic']['sine']*np.sin(phase) + \
model_par['cyclic']['cosine']*np.cos(phase)
if 'const_exp' in model_par['type']:
mdv += _forward_const_exp(dv_time, exc_time, exc,
model_par['const_exp']['sensitivity'],
model_par['const_exp']['tau'])
if 'var_exp' in model_par['type']:
mdv += _forward_var_exp(dv_time, exc_time, exc,
model_par['var_exp']['sensitivity'],
model_par['var_exp']['tau_scale'])
if 'const_log' in model_par['type']:
mdv += _forward_const_log(dv_time, exc_time, exc,
model_par['const_log']['sensitivity'],
model_par['const_log']['tau_min'],
model_par['const_log']['tau_max'])
if 'var_log' in model_par['type']:
mdv += _forward_var_log(dv_time, exc_time, exc,
model_par['var_log']['sensitivity'],
model_par['var_log']['tau_min'],
model_par['var_log']['tau_max_scale'])
if 'state_model' in model_par['type']:
#pdb.set_trace()
mdv += _forward_state_model_fd(dv_time, exc_time, exc,
model_par['state_model']['exc_scale'],
model_par['state_model']['sensitivity'],
np.logspace(model_par['state_model']['tau_min'],
model_par['state_model']['tau_max'],
model_par['state_model']['Ntau']),
model_par['state_model']['p0'])
if 'flat_state_model' in model_par['type']:
#pdb.set_trace()
mdv += _forward_flat_state_model_fd(dv_time, exc_time, exc,
model_par['flat_state_model']['exc_scale'],
model_par['flat_state_model']['sensitivity'],
np.logspace(model_par['flat_state_model']['tau_min'],
model_par['flat_state_model']['tau_max'],
model_par['flat_state_model']['Ntau']),
model_par['flat_state_model']['p0'])
if 'strain_rate_model' in model_par['type']:
#pdb.set_trace()
mdv += _forward_rate_model(dv_time, exc_time, exc,
model_par['strain_rate_model']['exc_scale'],
model_par['strain_rate_model']['sensitivity'],
np.logspace(model_par['strain_rate_model']['tau_min'],
model_par['strain_rate_model']['tau_max'],
model_par['strain_rate_model']['Ntau']),
model_par['strain_rate_model']['p0'])
return mdv
def _forward_state_model(dv_time, exc_time, exc, sensitivity, taus, p0):
"""Roel's and Jack's analytic solution to the state model.
units: exc_time in seconds
tau in seconds
"""
# int 2 - alpha t
#pdb.set_trace()
tim = exc_time - exc_time[0]
t_scale = exc_time[0]
dt = np.diff(exc_time)/t_scale
dt = np.concatenate((dt,np.array([dt[-1]])))
A = np.cumsum(exc*dt)
eA = np.exp(A)
p = np.zeros((len(tim),len(taus)))
for ind,tau in enumerate(taus):
if p0[ind] < 0:
p0[ind] = exc[0]/(1./tau/t_scale + exc[0])
emFt = np.exp(tim/tau) * eA
p[:,ind] = 1 - 1./emFt * (1 - p0[ind] + 1/tau/t_scale * np.cumsum(emFt*dt))
"""
def _forward_flat_state_model_fd(dv_time, exc_time, exc, exc_scale, sensitivity, taus, p0):
texc = deepcopy(exc)*exc_scale
p = np.zeros((len(exc_time),len(taus)))
alp = 1./taus
for ind,al in enumerate(alp):
if p0[ind] < 0:
p0[ind] = texc[0]/(1+texc[0])
p[0,:] = p0
dt = np.diff(exc_time)
pequi = texc/(1+texc)
for ind,tdt in enumerate(dt):
p[ind+1,:] = pequi[ind] + (p[ind,:] - pequi[ind])*np.exp(-alp*(1.+texc[ind])*tdt)
#p /= np.tile(np.atleast_2d(alp),(len(exc),1))
import pdb
import matplotlib.pyplot as plt
#p *= np.tile(alp,())
#plt.imshow(np.log(p),aspect='auto')
#plt.colorbar()
#plt.show()
#pdb.set_trace()
#p /= np.tile(alp,(len(texc),1))
model = np.sum(p,axis=1)
model = np.interp(dv_time-dv_time[0],exc_time,model)
model -= model[0]
model /= np.max(np.abs(model))
model *= sensitivity
#plt.imshow(p,aspect='auto')
#plt.show()
return model
"""
def _forward_flat_state_model_fd(dv_time, exc_time, exc, exc_scale, sensitivity, taus, p0):
texc = deepcopy(exc)*exc_scale
p = np.zeros((len(exc_time),len(taus)))
alp = 1./taus
for ind,al in enumerate(alp):
if p0[ind] < 0:
p0[ind] = texc[0]/(1+texc[0])
p[0,:] = p0
dt = np.diff(exc_time)
pequi = texc/(1+texc)
for ind,tdt in enumerate(dt):
p[ind+1,:] = pequi[ind] + (p[ind,:] - pequi[ind])*np.exp(-alp*tdt)
p /= np.tile(alp**1.,(len(texc),1))
#import pdb
#import matplotlib.pyplot as plt
#plt.imshow(np.log(p),aspect='auto')
#plt.colorbar()
#plt.colorbar()
#plt.show()
#pdb.set_trace()
model = np.sum(p,axis=1)
model = np.interp(dv_time-dv_time[0],exc_time,model)
model -= model[0]
model /= np.max(np.abs(model))
model *= sensitivity
#plt.imshow(p,aspect='auto')
#plt.show()
return model
def _forward_rate_model(dv_time, exc_time, exc, exc_scale, sensitivity, taus, p0):
texc = deepcopy(exc)*exc_scale
p = np.zeros((len(exc_time),len(taus)))
alp = 1./taus
for ind,al in enumerate(alp):
if p0[ind] < 0:
p0[ind] = texc[0]/(1+texc[0])
p[0,:] = p0
dt = np.diff(exc_time)
pequi = texc/(1+texc)
import pdb
pdb.set_trace()
for ind,tdt in enumerate(dt):
p[ind+1,:] = p[ind,:] - p[ind,:]*(1.-np.exp(-texc[ind]*alp*tdt)) + (p[ind,:] - p0)*(1.-np.exp(-alp*tdt))
import matplotlib.pyplot as plt
plt.imshow(np.log(p))
plt.show()
#p /= np.tile(alp**0.7,(len(texc),1))
#import pdb
#import matplotlib.pyplot as plt
#plt.imshow(np.log(p),aspect='auto')
#plt.colorbar()
#plt.colorbar()
#plt.show()
#pdb.set_trace()
model = np.sum(p,axis=1)
model = np.interp(dv_time-dv_time[0],exc_time,model)
model -= model[0]
model /= np.max(np.abs(model))
model *= sensitivity
#plt.imshow(p,aspect='auto')
#plt.show()
return model
def _forward_state_model_fd(dv_time, exc_time, exc, exc_scale, sensitivity, taus, p0):
"""numerical fd solution to the state model.
units: exc_time in seconds
tau in seconds
"""
# int 2 - alpha t
#pdb.set_trace()
texc = deepcopy(exc)*exc_scale
p = np.zeros((len(exc_time),len(taus)))
alp = 1./taus
for ind,al in enumerate(alp):
if p0[ind] < 0:
p0[ind] = texc[0]/(al+texc[0])
p[0,:] = p0
dt = np.diff(exc_time)
for ind,tdt in enumerate(dt):
pequi = texc[ind]/(alp+texc[ind])
p[ind+1,:] = pequi + (p[ind,:] - pequi)*np.exp(-(alp+texc[ind])*tdt)
model = np.sum(p,axis=1)
model = np.interp(dv_time-dv_time[0],exc_time,model)
model -= model[0]
model /= np.abs(np.max(model))
model *= sensitivity
#plt.imshow(p,aspect='auto')
#plt.show()
return model
def _forward_const_exp(dv_time, exc_time, exc, sensitivity, tau):
"""Expoential recovery with a constant time scales.
Exponential with constant time constant is convolved with excitation.
"""
rec = sensitivity * np.exp(-exc_time/(tau*31536000))
model = np.convolve(exc,rec,"full")
model = model[:len(exc)]
model = np.interp(dv_time,exc_time,model)
return model
def _forward_var_exp(dv_time, exc_time, exc, sensitivity, tau_scale):
"""Expoential recovery with variable time scales.
Recovery time of each excitation sample is scaled with the amplitude of
the excitation.
"""
model = np.zeros(len(dv_time))
for ii in np.arange(len(dv_time)):
t = dv_time[ii] - exc_time[exc_time<=dv_time[ii]]
texc = exc[:len(t)]
model[ii] = sensitivity * np.sum(texc * np.exp(-t/(tau_scale*31536000*texc)))
return model
def _forward_const_log(dv_time, exc_time, exc, sensitivity, tau_min, tau_max):
"""Logarithmic relaxation function with constant time scales
Excitation time series is convolved with a relaxation function generated
by integration of scaled exponential with time constats between tau_min
and tau_max
"""
tscales = np.logspace(tau_min,tau_max, num=100,endpoint=True,base=10)
rec = np.zeros(len(exc_time))
t = (exc_time - exc_time[0])/86400
for ts in tscales:
rec += np.exp(-t/ts)
rec *= sensitivity
model = np.convolve(exc,rec,"full")
model = model[:len(exc)]
model = np.interp(dv_time,exc_time,model)
model -= np.min(model)
return model
def _forward_var_log(dv_time, exc_time, exc, sensitivity, tau_min, tau_max_scale):
print sensitivity, tau_min, tau_max_scale
n_scales = 100
t = (exc_time - exc_time[0])/86400
tscales = np.logspace(tau_min, tau_max_scale+np.log10(np.max(exc)), num=n_scales,
endpoint=True,base=10)
exc_scales = tscales/np.power(10,tau_max_scale)
exc1 = np.zeros((1, next_pow_2(2*len(exc))))
exc1[0,:len(exc)] = exc
excm = np.tile(exc1,(n_scales,1))
expm = np.zeros_like(excm)
eexc = np.zeros(exc1.shape[1])
eexc[:len(exc)] = exc
for ind,ts in enumerate(tscales):
expm[ind,:len(exc)] = np.exp(-t/ts)
excm[ind,eexc < exc_scales[ind]] = 0
expm = np.fft.irfft(np.fft.rfft(expm,axis=1)*np.fft.rfft(excm,axis=1))
model = np.sum(expm,axis=0)
model = model[:len(exc_time)]
model = np.interp(dv_time,exc_time,model)
model *= np.power(10,sensitivity)
model -= np.min(model)
return model
| gpl-3.0 |
rrohan/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.